diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000..eedfbd2
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,6 @@
+[run]
+branch = True
+source = cinder-fusioncompute
+
+[report]
+ignore_errors = True
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..963e589
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,58 @@
+*.py[cod]
+
+# C extensions
+*.so
+
+# Packages
+*.egg*
+*.egg-info
+dist
+build
+eggs
+parts
+bin
+var
+sdist
+develop-eggs
+.installed.cfg
+lib
+lib64
+
+# Installer logs
+pip-log.txt
+
+# Unit test / coverage reports
+cover/
+.coverage*
+!.coveragerc
+.tox
+nosetests.xml
+.testrepository
+.venv
+
+# Translations
+*.mo
+
+# Mr Developer
+.mr.developer.cfg
+.project
+.pydevproject
+
+# Complexity
+output/*.html
+output/*/index.html
+
+# Sphinx
+doc/build
+
+# pbr generates these
+AUTHORS
+ChangeLog
+
+# Editors
+*~
+.*.swp
+.*sw?
+
+# Files created by releasenotes build
+releasenotes/build
\ No newline at end of file
diff --git a/.idea/cinder-fusioncompute.iml b/.idea/cinder-fusioncompute.iml
new file mode 100644
index 0000000..3c9964a
--- /dev/null
+++ b/.idea/cinder-fusioncompute.iml
@@ -0,0 +1,15 @@
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000..9eabf49
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000..d70bc09
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/workspace.xml b/.idea/workspace.xml
new file mode 100644
index 0000000..32d8044
--- /dev/null
+++ b/.idea/workspace.xml
@@ -0,0 +1,42 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 1478503875341
+
+
+ 1478503875341
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.mailmap b/.mailmap
new file mode 100644
index 0000000..516ae6f
--- /dev/null
+++ b/.mailmap
@@ -0,0 +1,3 @@
+# Format is:
+#
+#
diff --git a/.testr.conf b/.testr.conf
new file mode 100644
index 0000000..6d83b3c
--- /dev/null
+++ b/.testr.conf
@@ -0,0 +1,7 @@
+[DEFAULT]
+test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
+ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
+ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
+ ${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION
+test_id_option=--load-list $IDFILE
+test_list_option=--list
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
new file mode 100644
index 0000000..ba5329d
--- /dev/null
+++ b/CONTRIBUTING.rst
@@ -0,0 +1,17 @@
+If you would like to contribute to the development of OpenStack, you must
+follow the steps in this page:
+
+ http://docs.openstack.org/infra/manual/developers.html
+
+If you already have a good understanding of how the system works and your
+OpenStack accounts are set up, you can skip to the development workflow
+section of this documentation to learn how changes to OpenStack should be
+submitted for review via the Gerrit tool:
+
+ http://docs.openstack.org/infra/manual/developers.html#development-workflow
+
+Pull requests submitted through GitHub will be ignored.
+
+Bugs should be filed on Launchpad, not GitHub:
+
+ https://bugs.launchpad.net/cinder-fusioncompute
diff --git a/HACKING.rst b/HACKING.rst
new file mode 100644
index 0000000..7d5c531
--- /dev/null
+++ b/HACKING.rst
@@ -0,0 +1,4 @@
+cinder-fusioncompute Style Commandments
+===============================================
+
+Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..68c771a
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,176 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..c978a52
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,6 @@
+include AUTHORS
+include ChangeLog
+exclude .gitignore
+exclude .gitreview
+
+global-exclude *.pyc
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..416d6f2
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,19 @@
+===============================
+cinder-fusioncompute
+===============================
+
+Implementation of Cinder driver for Huawei Fusioncompute.
+
+Please fill here a long description which must be at least 3 lines wrapped on
+80 cols, so that distribution package maintainers can use it in their packages.
+Note that this is a hard requirement.
+
+* Free software: Apache license
+* Documentation: http://docs.openstack.org/developer/cinder-fusioncompute
+* Source: http://git.openstack.org/cgit/openstack/cinder-fusioncompute
+* Bugs: http://bugs.launchpad.net/cinder-fusioncompute
+
+Features
+--------
+
+* TODO
diff --git a/babel.cfg b/babel.cfg
new file mode 100644
index 0000000..15cd6cb
--- /dev/null
+++ b/babel.cfg
@@ -0,0 +1,2 @@
+[python: **.py]
+
diff --git a/cinder/__init__.py b/cinder/__init__.py
new file mode 100644
index 0000000..c69534f
--- /dev/null
+++ b/cinder/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2016 Huawei Technologies Co.,LTD.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pbr.version
+
+
+__version__ = pbr.version.VersionInfo(
+ 'cinder-fusioncompute').version_string()
diff --git a/cinder/tests/__init__.py b/cinder/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/cinder/tests/units/__init__.py b/cinder/tests/units/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/cinder/tests/units/volume/__init__.py b/cinder/tests/units/volume/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/cinder/tests/units/volume/drivers/__init__.py b/cinder/tests/units/volume/drivers/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/cinder/tests/units/volume/drivers/huawei/__init__.py b/cinder/tests/units/volume/drivers/huawei/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/cinder/tests/units/volume/drivers/huawei/fusioncompute/__init__.py b/cinder/tests/units/volume/drivers/huawei/fusioncompute/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/cinder/tests/units/volume/drivers/huawei/fusioncompute/base.py b/cinder/tests/units/volume/drivers/huawei/fusioncompute/base.py
new file mode 100644
index 0000000..1c30cdb
--- /dev/null
+++ b/cinder/tests/units/volume/drivers/huawei/fusioncompute/base.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2010-2011 OpenStack Foundation
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslotest import base
+
+
+class TestCase(base.BaseTestCase):
+
+ """Test case base class for all unit tests."""
diff --git a/cinder/tests/units/volume/drivers/huawei/fusioncompute/test_driver.py b/cinder/tests/units/volume/drivers/huawei/fusioncompute/test_driver.py
new file mode 100644
index 0000000..9021825
--- /dev/null
+++ b/cinder/tests/units/volume/drivers/huawei/fusioncompute/test_driver.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+test_cinder-fusioncompute
+----------------------------------
+
+Tests for `cinder-fusioncompute` module.
+"""
+
+from cinder.tests.units.volume.drivers.huawei.fusioncompute import base
+
+
+class TestCinder(base.TestCase):
+
+ def test_something(self):
+ pass
diff --git a/cinder/volume/drivers/huawei/__init__.py b/cinder/volume/drivers/huawei/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/cinder/volume/drivers/huawei/fusioncompute/__init__.py b/cinder/volume/drivers/huawei/fusioncompute/__init__.py
new file mode 100644
index 0000000..ba86124
--- /dev/null
+++ b/cinder/volume/drivers/huawei/fusioncompute/__init__.py
@@ -0,0 +1,18 @@
+# Copyright 2016 Huawei Technologies Co.,LTD.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+'''
+ init
+'''
diff --git a/cinder/volume/drivers/huawei/fusioncompute/base_proxy.py b/cinder/volume/drivers/huawei/fusioncompute/base_proxy.py
new file mode 100644
index 0000000..c887bf5
--- /dev/null
+++ b/cinder/volume/drivers/huawei/fusioncompute/base_proxy.py
@@ -0,0 +1,92 @@
+# Copyright 2016 Huawei Technologies Co.,LTD.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+[VRM DRIVER] VRM CLIENT.
+
+"""
+import urlparse
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from cinder.volume.drivers.huawei.vrm.conf import FC_DRIVER_CONF
+from cinder.volume.drivers.huawei.vrm.http_client import VRMHTTPClient
+
+
+TASK_WAITING = 'waiting'
+TASK_RUNNING = 'running'
+TASK_SUCCESS = 'success'
+TASK_FAILED = 'failed'
+TASK_CANCELLING = 'cancelling'
+TASK_UNKNOWN = 'unknown'
+
+CONF = cfg.CONF
+
+LOG = logging.getLogger(__name__)
+
+
+class BaseProxy(object):
+ '''BaseProxy
+
+ BaseProxy
+ '''
+ def __init__(self):
+
+ self.vrmhttpclient = VRMHTTPClient()
+ self.site_uri = self.vrmhttpclient.get_siteuri()
+ self.site_urn = self.vrmhttpclient.get_siteurn()
+ self.limit = 100
+ self.BASIC_URI = '/service'
+
+ def _joined_params(self, params):
+ '''_joined_params
+
+ :param params:
+ :return:
+ '''
+ param_str = []
+ for k, v in params.items():
+ if (k is None) or (v is None) or len(k) == 0:
+ continue
+ if k == 'scope' and v == self.site_urn:
+ continue
+ param_str.append("%s=%s" % (k, str(v)))
+ return '&'.join(param_str)
+
+ def _generate_url(self, path, query=None, frag=None):
+ '''_generate_url
+
+ :param path:
+ :param query:
+ :param frag:
+ :return:url
+ '''
+ if CONF.vrm_ssl:
+ scheme = 'https'
+ else:
+ scheme = 'http'
+ fc_ip = FC_DRIVER_CONF.fc_ip
+
+ netloc = str(fc_ip) + ':' + str(CONF.vrm_port)
+ if path.startswith(self.BASIC_URI):
+ url = urlparse.urlunsplit((scheme, netloc, path, query, frag))
+ else:
+ url = urlparse.urlunsplit(
+ (scheme, netloc, self.BASIC_URI + str(path), query, frag))
+ return url
+
+
+
diff --git a/cinder/volume/drivers/huawei/fusioncompute/cluster_proxy.py b/cinder/volume/drivers/huawei/fusioncompute/cluster_proxy.py
new file mode 100644
index 0000000..987248a
--- /dev/null
+++ b/cinder/volume/drivers/huawei/fusioncompute/cluster_proxy.py
@@ -0,0 +1,88 @@
+# Copyright 2016 Huawei Technologies Co.,LTD.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+[VRM DRIVER] VRM CLIENT.
+
+"""
+
+from oslo_log import log as logging
+
+from cinder.i18n import _
+from cinder.volume.drivers.huawei.vrm.base_proxy import BaseProxy
+
+
+TASK_WAITING = 'waiting'
+TASK_RUNNING = 'running'
+TASK_SUCCESS = 'success'
+TASK_FAILED = 'failed'
+TASK_CANCELLING = 'cancelling'
+TASK_UNKNOWN = 'unknown'
+
+LOG = logging.getLogger(__name__)
+
+
+class ClusterProxy(BaseProxy):
+ '''ClusterProxy
+
+ '''
+ def __init__(self):
+ super(ClusterProxy, self).__init__()
+
+ def list_cluster(self):
+ '''list_cluster
+
+ Get ?tag=xxx&clusterUrns=urn1&clusterUrns=urn2 HTTP/1.1
+ Host: https://:
+ Accept: application/json;version=; charset=UTF-8
+ X-Auth-Token:
+
+ :param:
+ :return:
+ '''
+ LOG.info(_("[VRM-CINDER] start list_cluster()"))
+ uri = '/clusters'
+ method = 'GET'
+ path = self.site_uri + uri
+
+ new_url = self._generate_url(path)
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ clusters = body.get('clusters')
+
+ return clusters
+
+ def list_hosts(self, **kwargs):
+ '''list_hosts
+
+ Get ?limit=20&offset=0&scope=xxx HTTP/1.1
+ Host: https://:
+ Accept: application/json;version=; charset=UTF-8
+ X-Auth-Token:
+
+ :param kwargs:
+ :return:
+ '''
+ LOG.info(_("[VRM-CINDER] start list_host()"))
+ uri = '/hosts'
+ method = 'GET'
+ path = self.site_uri + uri
+ params = {
+ 'scope': kwargs.get('clusterUrn'),
+ }
+ appendix = self._joined_params(params)
+ new_url = self._generate_url(path, appendix)
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ hosts = body.get('hosts')
+ return hosts
diff --git a/cinder/volume/drivers/huawei/fusioncompute/conf.py b/cinder/volume/drivers/huawei/fusioncompute/conf.py
new file mode 100644
index 0000000..b37a47a
--- /dev/null
+++ b/cinder/volume/drivers/huawei/fusioncompute/conf.py
@@ -0,0 +1,110 @@
+# Copyright 2016 Huawei Technologies Co.,LTD.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+[VRM DRIVER] CONFIG
+"""
+
+from oslo_config import cfg
+
+
+vrm_http_opts = [
+ cfg.StrOpt('vrm_version',
+ default='v6.0',
+ help='Management version of VRM'),
+ cfg.BoolOpt('vrm_ssl',
+ default=True,
+ help='Use SSL connection'),
+ cfg.StrOpt('vrm_port',
+ default=7443,
+ help='Management port of VRM'),
+ cfg.StrOpt('vrm_user',
+ default='gesysman',
+ help='User name for the VRM'),
+ cfg.StrOpt('vrm_password',
+ default='',
+ help='Password for the VRM'),
+ cfg.StrOpt('vrm_authtype',
+ default='0',
+ help='User name for the VRM'),
+ cfg.StrOpt('vrm_usertype',
+ default='2',
+ help='User type for the VRM'),
+ cfg.IntOpt('vrm_retries',
+ default=3,
+ help='retry times for http request'),
+ cfg.IntOpt('vrm_snapshot_sleeptime',
+ default=300,
+ help='sleep times for retry request'),
+ cfg.IntOpt('vrm_vol_snapshot_retries',
+ default=3,
+ help='retry times for http request'),
+ cfg.IntOpt('vrm_timeout',
+ default=30000,
+ help='timeout(s) for task result'),
+ cfg.IntOpt('vrm_limit',
+ default=100,
+ help='limit per step for retrive mass reources result set'),
+ cfg.IntOpt('vrm_sm_periodrate',
+ default=1,
+ help='timeout(s) for task result'),
+]
+VRM_group = cfg.OptGroup(name='VRM', title='VRM config')
+VRM_opts = [
+ cfg.StrOpt('fc_user',
+ help='FusionCompute user name'),
+ cfg.StrOpt('fc_pwd_for_cinder', secret=True,
+ help='FusionCompute user password'),
+ cfg.StrOpt('fc_ip',
+ help='Management IP of FusionCompute'),
+ cfg.StrOpt('fc_image_path',
+ help='NFS Image server path'),
+ cfg.StrOpt('glance_server_ip',
+ help='FusionSphere glance server ip'),
+ cfg.StrOpt('s3_store_access_key_for_cinder', secret=True,
+ help='FusionCompute uds image access key'),
+ cfg.StrOpt('s3_store_secret_key_for_cinder', secret=True,
+ help='FusionCompute uds image secret key'),
+ cfg.StrOpt('export_image_type',
+ help='FusionCompute export image type : nfs, glance, uds'),
+ cfg.StrOpt('uds_ip',
+ help='FusionSphere uds server ip'),
+ cfg.StrOpt('uds_port',
+ help='FusionSphere uds server port'),
+ cfg.StrOpt('uds_bucket_name',
+ help='FusionSphere uds server bucket name'),
+ cfg.StrOpt('uds_bucket_type',
+ help='FusionSphere uds server bucket type fixed or wildcard'),
+ cfg.ListOpt('vrm_ds_types',
+ default=['advanceSan', 'DSWARE', 'san', 'NAS', 'LUNPOME',
+ 'LOCAL', 'LOCALPOME'],
+ help='Management port of VRM'),
+ cfg.StrOpt('vrm_ds_hosts_share',
+ default='false',
+ help='FusionSphere support all host model'),
+ cfg.StrOpt('vrm_is_thin',
+ default='true',
+ help='FusionSphere create volume default type'),
+ cfg.StrOpt('export_version',
+ default='v6.0',
+ help='export version of VHD'),
+]
+CONF = cfg.CONF
+CONF.register_group(VRM_group)
+CONF.register_opts(VRM_opts, VRM_group)
+CONF.register_opts(vrm_http_opts)
+FC_DRIVER_CONF = CONF.VRM
+
+
diff --git a/cinder/volume/drivers/huawei/fusioncompute/datastore_proxy.py b/cinder/volume/drivers/huawei/fusioncompute/datastore_proxy.py
new file mode 100644
index 0000000..b526107
--- /dev/null
+++ b/cinder/volume/drivers/huawei/fusioncompute/datastore_proxy.py
@@ -0,0 +1,114 @@
+# Copyright 2016 Huawei Technologies Co.,LTD.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+[VRM DRIVER] VRM CLIENT.
+
+"""
+import json
+
+from oslo_log import log as logging
+
+from cinder.i18n import _
+from cinder.volume.drivers.huawei.vrm.base_proxy import BaseProxy
+from cinder.volume.drivers.huawei.vrm import exception as driver_exception
+
+TASK_WAITING = 'waiting'
+TASK_RUNNING = 'running'
+TASK_SUCCESS = 'success'
+TASK_FAILED = 'failed'
+TASK_CANCELLING = 'cancelling'
+TASK_UNKNOWN = 'unknown'
+
+LOG = logging.getLogger(__name__)
+
+
+class DatastoreProxy(BaseProxy):
+ '''DatastoreProxy
+
+ DatastoreProxy
+ '''
+ def __init__(self):
+ super(DatastoreProxy, self).__init__()
+
+ def list_datastore(self, **kwargs):
+ '''list_datastore
+
+ :param kwargs:
+ :return:
+ '''
+
+ # LOG.info(_("[VRM-CINDER] start list_datastore()"))
+ uri = '/datastores'
+ method = 'GET'
+ path = self.site_uri + uri
+
+ offset = 0
+ datastores = []
+ while True:
+ parames = {'limit': self.limit,
+ 'offset': offset,
+ 'scope': kwargs.get('scope')}
+
+ appendix = self._joined_params(parames)
+ new_url = self._generate_url(path, appendix)
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ total = int(body.get('total') or 0)
+ if total > 0:
+ res = body.get('datastores')
+ datastores += res
+ offset += len(res)
+ if offset >= total or len(datastores) >= total or len(
+ res) < self.limit:
+ break
+ else:
+ break
+
+ return datastores
+
+ def query_datastore(self, **kwargs):
+ '''Query DataStore
+
+ :param kwargs:
+ :return:
+ '''
+ # LOG.info(_("[VRM-CINDER] start list_datastore()"))
+ uri = '/datastores' + '/' + kwargs.get('id')
+ method = 'GET'
+ path = self.site_uri + uri
+ new_url = self._generate_url(path)
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ return body
+
+ def check_ds_connect_cluster(self, **kwargs):
+ '''Check cluster connected to the datastore
+
+ :param kwargs:
+ :return:
+ '''
+ LOG.info(_("[VRM-CINDER] start _check_ds_connect_cluster()"))
+ uri = '/datastores'
+ method = 'POST'
+ body = {'clusterUrn': kwargs.get('cluster_urn')}
+ path = self.site_uri + uri + '/' + kwargs.get(
+ 'datastore_id') + '/action/ifconcluster'
+ new_url = self._generate_url(path)
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ if_connect_flag = body.get('ifConnectFlag')
+ if if_connect_flag and if_connect_flag is True:
+ return
+ else:
+ raise driver_exception.NoNeededData()
diff --git a/cinder/volume/drivers/huawei/fusioncompute/exception.py b/cinder/volume/drivers/huawei/fusioncompute/exception.py
new file mode 100644
index 0000000..f36b88b
--- /dev/null
+++ b/cinder/volume/drivers/huawei/fusioncompute/exception.py
@@ -0,0 +1,184 @@
+# Copyright 2016 Huawei Technologies Co.,LTD.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+[VRM DRIVER] EXCEPTION
+"""
+
+from cinder import exception
+
+"""
+Exception definitions.
+"""
+
+
+class UnsupportedFeature(exception.CinderException):
+ '''UnsupportedFeature
+
+ UnsupportedFeature
+ '''
+ pass
+
+
+class UnsupportedVersion(UnsupportedFeature):
+ '''UnsupportedVersion
+
+ UnsupportedVersion
+ '''
+ pass
+
+
+class UnsupportedCommand(UnsupportedFeature):
+ '''UnsupportedCommand
+
+ UnsupportedCommand
+ '''
+ pass
+
+
+class AuthorizationFailure(exception.CinderException):
+ '''AuthorizationFailure
+
+ AuthorizationFailure
+ '''
+ pass
+
+
+class NoNeededData(exception.CinderException):
+ '''NoNeededData
+
+ NoNeededData
+ '''
+ pass
+
+
+class ClientException(exception.CinderException):
+ '''ClientException
+
+ ClientException
+ '''
+
+ def __init__(self, code=None, message=None, error_code=None,
+ error_des=None):
+
+ self.code = code
+ self.errorCode = error_code
+ self.errorDes = error_des
+ if message:
+ self.message = message
+ else:
+ self.message = "client exception."
+ super(ClientException, self).__init__(self.message)
+
+ def __str__(self):
+ formatted_string = "%s (HTTP %s)" % (self.message, self.code)
+ if self.errorCode:
+ formatted_string += " (errorCode: %s)" % self.errorCode
+
+ if self.errorDes:
+ formatted_string += " (errorDes: %s)" % self.errorDes
+
+ return formatted_string
+
+
+class BadRequest(ClientException):
+ """BadRequest
+
+ HTTP 400 - Bad request: you sent some malformed data.
+ """
+ http_status = 400
+ message = "Bad request"
+
+
+class Unauthorized(ClientException):
+ """Unauthorized
+
+ HTTP 401 - Unauthorized: bad credentials.
+ """
+ http_status = 401
+ message = "Unauthorized"
+
+
+class Forbidden(ClientException):
+ """Forbidden
+
+ HTTP 403 - Forbidden: your credentials don't give you access to this
+ resource.
+ """
+ http_status = 403
+ message = "Forbidden"
+
+
+class NotFound(ClientException):
+ """NotFound
+
+ HTTP 404 - Not found
+ """
+ http_status = 404
+ message = "Not found"
+
+
+class OverLimit(ClientException):
+ """OverLimit
+
+ HTTP 413 - Over limit: you're over the API limits for this time period.
+ """
+ http_status = 413
+ message = "Over limit"
+
+
+class HTTPNotImplemented(ClientException):
+ """HTTPNotImplemented
+
+ HTTP 501 - Not Implemented: the server does not support this operation.
+ """
+ http_status = 501
+ message = "Not Implemented"
+
+
+class FusionComputeDriverException(ClientException):
+ '''FusionComputeDriverException
+
+ FusionComputeDriverException
+ '''
+ http_status = 500
+ message = "FusionCompute driver exception occurred."
+
+
+_code_map = dict((c.http_status, c) for c in [BadRequest, Unauthorized,
+ Forbidden, NotFound,
+ OverLimit, HTTPNotImplemented])
+
+
+def exception_from_response(response, body):
+ """exception_from_response
+
+ Return an instance of an ClientException or subclass
+ based on an requests response.
+
+ Usage::
+
+ resp, body = requests.request(...)
+ if resp.status_code != 200:
+ raise exception_from_response(resp, rest.text)
+ """
+ cls = _code_map.get(response.status_code, ClientException)
+ if body:
+ error_code = body.get('errorCode', None)
+ error_des = body.get('errorDes', None)
+ return cls(code=response.status_code, errorCode=error_code,
+ errorDes=error_des)
+ else:
+ return cls(code=response.status_code)
diff --git a/cinder/volume/drivers/huawei/fusioncompute/host_proxy.py b/cinder/volume/drivers/huawei/fusioncompute/host_proxy.py
new file mode 100644
index 0000000..5d2a76f
--- /dev/null
+++ b/cinder/volume/drivers/huawei/fusioncompute/host_proxy.py
@@ -0,0 +1,77 @@
+# Copyright 2016 Huawei Technologies Co.,LTD.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+[VRM DRIVER] VRM CLIENT.
+
+"""
+
+from oslo_log import log as logging
+
+from cinder.volume.drivers.huawei.vrm.base_proxy import BaseProxy
+
+
+TASK_WAITING = 'waiting'
+TASK_RUNNING = 'running'
+TASK_SUCCESS = 'success'
+TASK_FAILED = 'failed'
+TASK_CANCELLING = 'cancelling'
+TASK_UNKNOWN = 'unknown'
+
+LOG = logging.getLogger(__name__)
+
+
+class HostProxy(BaseProxy):
+ '''HostProxy
+
+ '''
+ def __init__(self):
+ super(HostProxy, self).__init__()
+
+ def list_host(self, **kwargs):
+ '''list_host
+
+ :param kwargs:
+ :return:
+ '''
+ # LOG.info(_("[VRM-CINDER] start list_host()"))
+ uri = '/hosts'
+ method = 'GET'
+ path = self.site_uri + uri
+
+ offset = 0
+ hosts = []
+ while True:
+ parameters = {'limit': self.limit,
+ 'offset': offset,
+ 'scope': kwargs.get('scope')}
+
+ appendix = self._joined_params(parameters)
+ new_url = self._generate_url(path, appendix)
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ total = int(body.get('total') or 0)
+ if total > 0:
+ res = body.get('hosts')
+ hosts += res
+ offset += len(res)
+ if offset >= total or len(hosts) >= total or len(
+ res) < self.limit:
+ break
+ else:
+ break
+
+ return hosts
+
+
diff --git a/cinder/volume/drivers/huawei/fusioncompute/http_client.py b/cinder/volume/drivers/huawei/fusioncompute/http_client.py
new file mode 100644
index 0000000..446deab
--- /dev/null
+++ b/cinder/volume/drivers/huawei/fusioncompute/http_client.py
@@ -0,0 +1,399 @@
+# Copyright 2016 Huawei Technologies Co.,LTD.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+[VRM DRIVER] VRM CLIENT.
+
+"""
+import json
+import requests
+import urlparse
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from cinder.i18n import _
+from cinder.volume.drivers.huawei.vrm.conf import FC_DRIVER_CONF
+from cinder.volume.drivers.huawei.vrm import exception as driver_exception
+from cinder.volume.drivers.huawei.vrm import utils as apiutils
+
+try:
+ from eventlet import sleep
+except ImportError:
+ from time import sleep
+
+TASK_WAITING = 'waiting'
+TASK_RUNNING = 'running'
+TASK_SUCCESS = 'success'
+TASK_FAILED = 'failed'
+TASK_CANCELLING = 'cancelling'
+TASK_UNKNOWN = 'unknown'
+
+CONF = cfg.CONF
+
+LOG = logging.getLogger(__name__)
+
+
+class VRMHTTPClient(object):
+ """Executes volume driver commands on VRM."""
+
+ USER_AGENT = 'VRM-HTTP-Client for OpenStack'
+ RESOURCE_URI = 'uri'
+ TASK_URI = 'taskUri'
+ BASIC_URI = '/service'
+ vrm_commands = None
+
+ def __init__(self):
+ '''VRMHTTPClient init
+
+ __init__
+
+ :return:
+ '''
+ fc_ip = FC_DRIVER_CONF.fc_ip
+ fc_image_path = FC_DRIVER_CONF.fc_image_path
+ fc_user = FC_DRIVER_CONF.fc_user
+ fc_pwd = FC_DRIVER_CONF.fc_pwd_for_cinder
+ self.ssl = CONF.vrm_ssl
+ self.host = fc_ip
+ self.port = CONF.vrm_port
+ self.user = fc_user
+ self.userType = CONF.vrm_usertype
+ self.password = apiutils.sha256_based_key(fc_pwd)
+ self.retries = CONF.vrm_retries
+ self.timeout = CONF.vrm_timeout
+ self.limit = CONF.vrm_limit
+ self.image_url = fc_image_path
+ self.image_type = '.xml'
+
+ self.versions = None
+ self.version = None
+ self.auth_uri = None
+ self.auth_url = None
+
+ self.auth_token = None
+
+ self.sites = None
+ self.site_uri = None
+ self.site_urn = None
+ self.site_url = None
+
+ self.shared_hosts = None
+ self.shared_datastores = None
+ self.shared_volumes = None
+
+ def _generate_url(self, path, query=None, frag=None):
+ '''_generate_url
+
+ _generate_url
+
+ :param path:
+ :param query:
+ :param frag:
+ :return:
+ '''
+ if CONF.vrm_ssl:
+ scheme = 'https'
+ else:
+ scheme = 'http'
+ fc_ip = FC_DRIVER_CONF.fc_ip
+
+ netloc = str(fc_ip) + ':' + str(CONF.vrm_port)
+ if path.startswith(self.BASIC_URI):
+ url = urlparse.urlunsplit((scheme, netloc, path, query, frag))
+ else:
+ url = urlparse.urlunsplit(
+ (scheme, netloc, self.BASIC_URI + str(path), query, frag))
+ return url
+
+ def _http_log_req(self, args, kwargs):
+ '''_http_log_req
+
+ _http_log_req
+
+ :param args:
+ :param kwargs:
+ :return:
+ '''
+ string_parts = ['\n curl -i']
+ for element in args:
+ if element in ('GET', 'POST', 'DELETE', 'PUT'):
+ string_parts.append(' -X %s' % element)
+ else:
+ string_parts.append(' %s' % element)
+
+ for element in kwargs['headers']:
+ header = ' -H "%s: %s"' % (element, kwargs['headers'][element])
+ string_parts.append(header)
+
+ if 'body' in kwargs:
+ string_parts.append(" -d '%s'" % (kwargs['body']))
+
+ def _http_log_resp(self, resp):
+ '''_http_log_resp
+
+ _http_log_resp
+
+ :param resp:
+ :return:
+ '''
+ try:
+ if resp.status_code:
+ if int(resp.status_code) != 200:
+ LOG.info(_("RESP status_code: [%s]"), resp.status_code)
+ except Exception:
+ LOG.info(_("[VRM-CINDER] _http_log_resp exception"))
+
+ def request(self, url, method, **kwargs):
+ '''request
+
+ request
+
+ :param url:
+ :param method:
+ :param kwargs:
+ :return:
+ '''
+ auth_attempts = 0
+ attempts = 0
+ step = 1
+ while True:
+ step *= 2
+ attempts += 1
+ if not self.auth_url or not self.auth_token or not self.site_uri:
+ LOG.info(_("[VRM-CINDER] auth_url is none. "))
+
+ kwargs.setdefault('headers', {})['X-Auth-Token'] = self.auth_token
+ try:
+ resp, body = self.try_request(url, method, **kwargs)
+ return resp, body
+
+ except driver_exception.Unauthorized as e:
+ LOG.error('[VRM-CINDER] error message is :%s' % e)
+ if e.errorCode == '10000040':
+ if auth_attempts > 2:
+ LOG.error("license error.")
+ raise driver_exception.ClientException(101)
+ if auth_attempts > 10:
+ raise driver_exception.ClientException(101)
+ LOG.info("Unauthorized, reauthenticating.")
+ attempts -= 1
+ auth_attempts += 1
+ sleep(step)
+ self.authenticate()
+ continue
+ except driver_exception.ClientException as ex:
+ if attempts > self.retries:
+ LOG.info(_("[VRM-CINDER] ClientException "))
+ raise ex
+ if 500 <= ex.code <= 599:
+ LOG.info(_("[VRM-CINDER] ClientException "))
+ else:
+ LOG.info(_("[VRM-CINDER] ClientException "))
+ raise ex
+ except requests.exceptions.ConnectionError as ex:
+ LOG.error("Connection Error: %s" % ex)
+ LOG.error("Connection Error: %s" % ex.message)
+ raise ex
+ LOG.info(
+ "Failed attempt(%s of %s), retrying in %s seconds" %
+ (attempts, self.retries, step))
+ sleep(step)
+
+ def try_request(self, url, method, **kwargs):
+ '''try_request
+
+ request
+
+ :param url:
+ :param method:
+ :param kwargs:
+ :return:
+ '''
+
+ no_version = False
+ if not self.version:
+ no_version = True
+ if url.endswith('session'):
+ no_version = True
+
+ kwargs.setdefault('headers', kwargs.get('headers', {}))
+ kwargs['headers']['User-Agent'] = self.USER_AGENT
+ if no_version:
+ kwargs['headers']['Accept'] = 'application/json;charset=UTF-8'
+ else:
+ version = self.version.lstrip(' v')
+ if url.endswith('/action/export'):
+ export_version = FC_DRIVER_CONF.export_version
+ version = '1.2' if export_version == 'v1.2' else \
+ self.version.lstrip(' v')
+ kwargs['headers']['Accept'] = 'application/json;version=' + \
+ version + ';charset=UTF-8'
+ kwargs['headers']['X-Auth-Token'] = self.auth_token
+ kwargs['headers']['Accept-Language'] = 'en_US'
+ if 'body' in kwargs:
+ if kwargs['body'] and len(kwargs['body']) > 0:
+ kwargs['headers'][
+ 'Content-Type'] = 'application/json;charset=UTF-8'
+ kwargs['data'] = kwargs['body']
+
+ body = apiutils.str_drop_password_key(kwargs['body'])
+ # LOG.info(_("[VRM-CINDER] request body [%s]"), body)
+ del kwargs['body']
+
+ self._http_log_req((url, method,), kwargs)
+ resp = requests.request(
+ method,
+ url,
+ verify=False,
+ **kwargs)
+ self._http_log_resp(resp)
+
+ if resp.content:
+ try:
+ body = json.loads(resp.content)
+ except ValueError:
+ body = None
+ else:
+ body = None
+# LOG.info(_("[VRM-CINDER] request status_code [%d]"), resp.status_code)
+ if resp.status_code >= 400:
+ LOG.error(_("error response, error is %s"), body)
+ raise driver_exception.exception_from_response(resp, body)
+
+ return resp, body
+
+ def _prepare_version_and_auth_url(self):
+ '''_prepare_version_and_auth_url
+
+ _prepare_version_and_auth_url
+
+ :return:
+ '''
+ self.version = CONF.vrm_version
+ self.auth_uri = '/service/session'
+ self.auth_url = self._generate_url(self.auth_uri)
+
+ def _prepare_auth_token(self):
+ '''_prepare_auth_token
+
+ _prepare_auth_token
+
+ :return:
+ '''
+ uri = '/service/session'
+ new_url = self._generate_url(uri)
+ # self.auth_token = None
+ headers = {'X-Auth-User': self.user,
+ 'X-Auth-Key': self.password,
+ 'X-Auth-UserType': self.userType, }
+ resp, body = self.try_request(new_url, 'POST', headers=headers)
+ if resp.status_code in (200, 204):
+ self.auth_token = resp.headers['x-auth-token']
+
+ def _prepare_site_uri(self):
+ '''_prepare_site_uri
+
+ _prepare_site_uri
+
+ :return:
+ '''
+ self.site_uri = self.site_urn = self.site_url = None
+ url = self._generate_url('/sites')
+ headers = {'X-Auth-Token': self.auth_token}
+
+ resp, body = self.try_request(url, 'GET', headers=headers)
+ if resp.status_code in (200, 204):
+ self.sites = body['sites']
+ if len(self.sites) == 1:
+ self.site_uri = self.sites[0]['uri']
+ self.site_urn = self.sites[0]['urn']
+ self.site_url = self._generate_url(self.site_uri)
+ return
+ else:
+ for si in self.sites:
+ if si['urn'] == FC_DRIVER_CONF.vrm_siteurn:
+ self.site_uri = si['uri']
+ self.site_urn = si['urn']
+ self.site_url = self._generate_url(self.site_uri)
+ return
+
+ raise driver_exception.NotFound()
+
+ def authenticate(self):
+ '''authenticate
+
+ authenticate
+
+ :return:
+ '''
+ self._prepare_version_and_auth_url()
+ self._prepare_auth_token()
+ self._prepare_site_uri()
+
+ if not self.version:
+ LOG.info(_("[VRM-CINDER] (%s)"), 'AuthorizationFailure')
+ raise driver_exception.AuthorizationFailure
+ if not self.auth_url:
+ LOG.info(_("[VRM-CINDER] (%s)"), 'AuthorizationFailure')
+ raise driver_exception.AuthorizationFailure
+ if not self.site_uri:
+ LOG.info(_("[VRM-CINDER] (%s)"), 'AuthorizationFailure')
+ raise driver_exception.AuthorizationFailure
+
+ def get_version(self):
+ '''get_version
+
+ get_version
+
+ :return:
+ '''
+ return self.version
+
+ def get_siteurn(self):
+ '''get_siteurn
+
+ get_siteurn
+
+ :return:
+ '''
+ if self.site_uri is None:
+ self.init()
+ return self.site_urn
+
+ def get_siteuri(self):
+ '''get_siteuri
+
+ get_siteurn
+
+ :return:
+ '''
+ if self.site_uri is None:
+ self.init()
+ return self.site_uri
+
+ def init(self):
+ '''init
+
+ init
+
+ :return:
+ '''
+ LOG.info(_("[VRM-CINDER] start init()"))
+ self.authenticate()
+
+
+
+
diff --git a/cinder/volume/drivers/huawei/fusioncompute/task_proxy.py b/cinder/volume/drivers/huawei/fusioncompute/task_proxy.py
new file mode 100644
index 0000000..aa9c5da
--- /dev/null
+++ b/cinder/volume/drivers/huawei/fusioncompute/task_proxy.py
@@ -0,0 +1,130 @@
+# Copyright 2016 Huawei Technologies Co.,LTD.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+[VRM DRIVER] VRM CLIENT.
+
+"""
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from cinder.i18n import _
+from cinder.volume.drivers.huawei.vrm.base_proxy import BaseProxy
+from cinder.volume.drivers.huawei.vrm import exception as driver_exception
+from cinder.volume.drivers.huawei.vrm.utils import Delete_Snapshot_Code
+
+try:
+ from eventlet import sleep
+except ImportError:
+ from time import sleep
+
+TASK_WAITING = 'waiting'
+TASK_RUNNING = 'running'
+TASK_SUCCESS = 'success'
+TASK_FAILED = 'failed'
+TASK_CANCELLING = 'cancelling'
+TASK_UNKNOWN = 'unknown'
+
+CONF = cfg.CONF
+
+LOG = logging.getLogger(__name__)
+
+
+class TaskProxy(BaseProxy):
+ '''TaskProxy
+
+ TaskProxy
+ '''
+ def __init__(self):
+ super(TaskProxy, self).__init__()
+
+ def wait_task(self, isShortQuery=0, **kwargs):
+ '''wait_task
+
+ :param kwargs:
+ :return:
+ '''
+ LOG.info(_("[VRM-CINDER] start wait_task()"))
+
+ task_uri = kwargs.get('task_uri')
+ method = 'GET'
+ if task_uri is None:
+ LOG.info(_("[VRM-CINDER] task_uri is none."))
+ raise driver_exception.ClientException(101)
+ else:
+ new_url = self._generate_url(task_uri)
+ retry = 0
+ error_num = 0
+ while retry < int(CONF.vrm_timeout):
+ if isShortQuery == 0:
+ if retry > 10:
+ retry += 10
+ sleep(10)
+ else:
+ retry += 3
+ sleep(3)
+ else:
+ if retry > 10:
+ retry += 10
+ sleep(10)
+ else:
+ retry += 1
+ sleep(1)
+ try:
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ except Exception as ex:
+ LOG.info(_("[VRM-CINDER] querytask request exception."))
+ error_num += 1
+ if 30 < error_num:
+ LOG.info(
+ _("[VRM-CINDER] querytask request exception."))
+ raise ex
+ else:
+ continue
+
+ if body:
+ status = body.get('status')
+ if status in [TASK_WAITING, TASK_RUNNING]:
+
+ continue
+ elif status in [TASK_SUCCESS]:
+ LOG.info(
+ _("[VRM-CINDER] return TASK_SUCCESS wait_task()"))
+ return status
+ elif status in [TASK_FAILED, TASK_CANCELLING]:
+ LOG.info(
+ _("[VRM-CINDER] return TASK_FAILED wait_task()"))
+ if body.get('reason') in Delete_Snapshot_Code:
+ raise driver_exception.ClientException(
+ code=101,
+ message=body.get('reasonDes'),
+ errorCode=body.get('reason'))
+ raise driver_exception.ClientException(101)
+ else:
+ LOG.info(_("[VRM-CINDER] pass wait_task()"))
+ error_num += 1
+ if 30 < error_num:
+ raise driver_exception.ClientException(101)
+ else:
+ continue
+ else:
+ LOG.info(_("[VRM-CINDER] body is none."))
+ error_num += 1
+ if 30 < error_num:
+ raise driver_exception.ClientException(101)
+ else:
+ continue
+
+
diff --git a/cinder/volume/drivers/huawei/fusioncompute/utils.py b/cinder/volume/drivers/huawei/fusioncompute/utils.py
new file mode 100644
index 0000000..42b6d7e
--- /dev/null
+++ b/cinder/volume/drivers/huawei/fusioncompute/utils.py
@@ -0,0 +1,92 @@
+# Copyright 2016 Huawei Technologies Co.,LTD.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+ FC Driver utils function
+"""
+import hashlib
+import sys
+import traceback
+
+from cinder.i18n import _
+from oslo_log import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def log_exception(exception=None):
+ """log_exception
+
+ :param exception:
+ :return:
+ """
+
+ if exception:
+ pass
+
+ etype, value, track_tb = sys.exc_info()
+ error_list = traceback.format_exception(etype, value, track_tb)
+ for error_info in error_list:
+ LOG.error(error_info)
+
+
+def str_drop_password_key(str_data):
+ """str_drop_password_key
+
+ remove json password key item
+ :param data:
+ :return:
+ """
+
+ dict_data = eval(str_data)
+ if isinstance(dict_data, dict):
+ drop_password_key(dict_data)
+ return str(dict_data)
+ else:
+ LOG.info(
+ _("[BRM-DRIVER] str_data can't change to dict, str_data:(%s) "),
+ str_data)
+ return
+
+
+def drop_password_key(data):
+ """remove json password key item
+
+ :param data:
+ :return:
+ """
+ encrypt_list = ['password', 'vncpassword', 'oldpassword',
+ 'domainpassword', 'vncoldpassword', 'vncnewpassword',
+ 'auth_token', 'token', 'fc_pwd', 'accessKey',
+ 'secretKey']
+ for key in data.keys():
+ if key in encrypt_list:
+ del data[key]
+ elif data[key] and isinstance(data[key], dict):
+ drop_password_key(data[key])
+
+
+def sha256_based_key(key):
+ """sha256_based_key
+
+ generate sha256 based key
+ :param key:
+ :return:
+ """
+ hash_ = hashlib.sha256()
+ hash_.update(key)
+ return hash_.hexdigest()
+
+Delete_Snapshot_Code = ['10400004']
diff --git a/cinder/volume/drivers/huawei/fusioncompute/vm_proxy.py b/cinder/volume/drivers/huawei/fusioncompute/vm_proxy.py
new file mode 100644
index 0000000..d2ca9df
--- /dev/null
+++ b/cinder/volume/drivers/huawei/fusioncompute/vm_proxy.py
@@ -0,0 +1,1168 @@
+# Copyright 2016 Huawei Technologies Co.,LTD.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+[VRM DRIVER] VRM CLIENT.
+
+"""
+import json
+import threading
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from cinder import exception
+from cinder.i18n import _
+from cinder.volume.drivers.huawei.vrm.base_proxy import BaseProxy
+from cinder.volume.drivers.huawei.vrm.conf import FC_DRIVER_CONF
+from cinder.volume.drivers.huawei.vrm.task_proxy import TaskProxy
+
+try:
+ from eventlet import sleep
+except ImportError:
+ from time import sleep
+
+TASK_WAITING = 'waiting'
+TASK_RUNNING = 'running'
+TASK_SUCCESS = 'success'
+TASK_FAILED = 'failed'
+TASK_CANCELLING = 'cancelling'
+TASK_UNKNOWN = 'unknown'
+
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+vrm_template_lock = threading.Lock()
+
+
+class VmProxy(BaseProxy):
+ '''VmProxy
+
+ VmProxy
+ '''
+ def __init__(self, *args, **kwargs):
+ super(VmProxy, self).__init__()
+ self.task_proxy = TaskProxy()
+
+ def query_vm(self, **kwargs):
+ '''Query VM
+
+ 'query_vm': ('GET',
+ ('/vms', None, kwargs.get('vm_id'), None),
+ {},
+ {},
+ False)
+ '''
+ LOG.info(_("[VRM-CINDER] start query_vm()"))
+ uri = '/vms'
+ method = 'GET'
+ path = self.site_uri + uri + '/' + kwargs.get('vm_id')
+ new_url = self._generate_url(path)
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ LOG.info(_("[VRM-CINDER] end ()"))
+ return body
+
+ def query_vm_by_uri(self, **kwargs):
+ '''Query VM by Uri
+
+ 'query_vm': ('GET',
+ ('/vms', None, kwargs.get('vm_id'), None),
+ {},
+ {},
+ False)
+ '''
+ LOG.info(_("[VRM-CINDER] start query_vm()"))
+ method = 'GET'
+ path = kwargs.get('vm_uri')
+ new_url = self._generate_url(path)
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ LOG.info(_("[VRM-CINDER] end ()"))
+ return body
+
+ def delete_vm(self, **kwargs):
+ '''Delete VM
+
+ 'delete_vm': ('DELETE',
+ ('/vms', None, kwargs.get('vm_id'), None),
+ {},
+ {},
+ True),
+ '''
+ LOG.info(_("[VRM-CINDER] start delete_vm()"))
+ uri = '/vms'
+ method = 'DELETE'
+ path = self.site_uri + uri + '/' + kwargs.get('vm_id')
+ if kwargs.get('isReserveDisks') is not None and kwargs.get(
+ 'isReserveDisks') == 1:
+ path = path + '?isReserveDisks=1'
+ new_url = self._generate_url(path)
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ task_uri = body.get('taskUri')
+ if kwargs.get('isReserveDisks') is not None and kwargs.get(
+ 'isReserveDisks') == 1:
+ self.task_proxy.wait_task(task_uri=task_uri, isShortQuery=1)
+ else:
+ self.task_proxy.wait_task(task_uri=task_uri)
+ LOG.info(_("[VRM-CINDER] end ()"))
+
+ def detach_vol_from_vm(self, **kwargs):
+ '''detach_vol_from_vm
+
+ 'detach_vol_from_vm': ('POST',
+ ('/vms', None, kwargs.get('vm_id'),
+ 'action/detachvol'),
+ {},
+ {'volUrn': kwargs.get('volUrn')},
+ True),
+ '''
+ LOG.info(_("[VRM-CINDER] start detach_vol_from_vm()"))
+ uri = '/vms'
+ method = 'POST'
+ path = self.site_uri + uri + '/' + kwargs.get(
+ 'vm_id') + '/action/detachvol'
+ new_url = self._generate_url(path)
+ body = {'volUrn': kwargs.get('volume_urn')}
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+ LOG.info(_("[VRM-CINDER] end ()"))
+
+ def attach_vol_to_vm(self, **kwargs):
+ '''attach_vol_to_vm
+
+ 'attach_vol_to_vm': ('POST',
+ ('/vms', None, kwargs.get('vm_id'),
+ 'action/attachvol'),
+ {},
+ {'volUrn': kwargs.get('volUrn')},
+ True),
+ '''
+ LOG.info(_("[VRM-CINDER] start attach_vol_to_vm()"))
+ uri = '/vms'
+ method = 'POST'
+ path = self.site_uri + uri + '/' + kwargs.get(
+ 'vm_id') + '/action/attachvol'
+ new_url = self._generate_url(path)
+ body = {'volUrn': kwargs.get('volume_urn')}
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+ LOG.info(_("[VRM-CINDER] end attach_vol_to_vm()"))
+
+ def stop_vm(self, **kwargs):
+ '''stop_vm
+
+ 'stop_vm': ('POST',
+ ('/vms', None, kwargs.get('vm_id'), 'action/stop'),
+ {},
+ {'mode': kwargs.get('mode')},
+ True),
+ '''
+ LOG.info(_("[VRM-CINDER] start stop_vm()"))
+ uri = '/vms'
+ method = 'POST'
+ path = self.site_uri + uri + '/' + kwargs.get('vm_id') + '/action/stop'
+ new_url = self._generate_url(path)
+ body = {'mode': kwargs.get('mode')}
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+ LOG.info(_("[VRM-CINDER] end ()"))
+
+ def _combine_empty_vmConfig(self, **kwargs):
+ '''_combine_empty_vmConfig
+
+ :param kwargs:
+ :return:
+ '''
+ LOG.info(_("[VRM-CINDER] start _combine_empty_vmConfig ()"))
+ cpu_quantity = 2
+ mem_quantityMB = 1024
+
+ cpu = {'quantity': cpu_quantity}
+ memory = {'quantityMB': mem_quantityMB}
+ disks = []
+ vmConfigBody = {
+ 'cpu': cpu,
+ 'memory': memory,
+ 'disks': disks,
+ }
+ LOG.info(_("[VRM-CINDER] _combine_empty_vmConfig end ()"))
+ return vmConfigBody
+
+ def _combine_vmConfig_4_import(self, **kwargs):
+ '''_combine_vmConfig_4_import
+
+ :param kwargs:
+ :return:
+ '''
+ LOG.info(_("[VRM-CINDER] start _combine_vmConfig ()"))
+ cpu_quantity = 2
+ mem_quantityMB = 1024
+ datastoreUrn = kwargs.get('ds_urn')
+ if kwargs.get('volume_sequence_num') is None:
+ kwargs['volume_sequence_num'] = 1
+ link = kwargs.get('linkClone')
+ disk_quantityGB = kwargs.get('volume_size')
+ if link:
+ uuid = ""
+ if kwargs.get('quick_start') is True:
+ disk_quantityGB = kwargs.get('image_size')
+ else:
+ uuid = kwargs.get("volume_id")
+
+ thin = kwargs.get('is_thin')
+ volumeUrn = kwargs.get('volume_urn')
+ cpu = {'quantity': cpu_quantity}
+ memory = {'quantityMB': mem_quantityMB}
+ disks = [
+ {
+ 'volumeUrn': volumeUrn,
+ 'datastoreUrn': datastoreUrn,
+ 'quantityGB': disk_quantityGB,
+ 'volType': 0,
+ 'sequenceNum': kwargs.get('volume_sequence_num'),
+ 'pciType': 'IDE',
+ 'volumeUuid': uuid,
+ 'isThin': thin,
+ 'isCoverData': True
+ }
+ ]
+ vmConfigBody = {
+ 'cpu': cpu,
+ 'memory': memory,
+ 'disks': disks,
+ }
+ LOG.info(_("[VRM-CINDER] end ()"))
+ return vmConfigBody
+
+ def _combine_vmConfig_4_clone(self, **kwargs):
+ '''_combine_vmConfig_4_clone
+
+ :param kwargs:
+ :return:
+ '''
+ LOG.info(_("[VRM-CINDER] start _combine_vmConfig ()"))
+ cpu_quantity = 2
+ mem_quantityMB = 1024
+ datastoreUrn = kwargs.get('ds_urn')
+ if kwargs.get('volume_sequence_num') is None:
+ kwargs['volume_sequence_num'] = 1
+ disk_quantityGB = kwargs.get('volume_size')
+ link = kwargs.get('linkClone')
+ if link:
+ uuid = ""
+ else:
+ uuid = kwargs.get("volume_id")
+
+ thin = kwargs.get('is_thin')
+ cpu = {'quantity': cpu_quantity}
+ memory = {'quantityMB': mem_quantityMB}
+ disks = [
+ {
+ 'datastoreUrn': datastoreUrn,
+ 'quantityGB': disk_quantityGB,
+ 'volType': 0,
+ 'sequenceNum': 1,
+ 'pciType': 'IDE',
+ 'volumeUuid': uuid,
+ 'isThin': thin
+ }
+ ]
+ vmConfigBody = {
+ 'cpu': cpu,
+ 'memory': memory,
+ 'disks': disks,
+ }
+ LOG.info(_("[VRM-CINDER] end ()"))
+ return vmConfigBody
+
+ def _combine_vmConfig_4_export(self, **kwargs):
+ '''_combine_vmConfig_4_export
+
+ :param kwargs:
+ :return:
+ '''
+ LOG.info(_("[VRM-CINDER] start _combine_vmConfig ()"))
+ cpu_quantity = 2
+ mem_quantityMB = 1024
+ if kwargs.get('volume_sequence_num') is None:
+ kwargs['volume_sequence_num'] = 1
+ cpu = {'quantity': cpu_quantity}
+ memory = {'quantityMB': mem_quantityMB}
+ disks = [
+ {
+ 'sequenceNum': kwargs.get('volume_sequence_num'),
+ }
+ ]
+ vmConfigBody = {
+ 'cpu': cpu,
+ 'memory': memory,
+ 'disks': disks,
+ }
+ LOG.info(_("[VRM-CINDER] end ()"))
+ return vmConfigBody
+
+ def _combine_os_options(self, **kwargs):
+ '''_combine_os_options
+
+ :param kwargs:
+ :return:
+ '''
+ osOptions = {
+ 'osType': 'Windows',
+ 'osVersion': 26
+ }
+ LOG.info(_("[VRM-CINDER] end ()"))
+ return osOptions
+
+ def clone_vm(self, **kwargs):
+ '''clone_vm
+
+ :param kwargs:
+ :return:
+ '''
+
+ LOG.info(_("[VRM-CINDER] start clone_vm()"))
+ uri = '/vms'
+ method = 'POST'
+ path = self.site_uri + uri + '/' + kwargs.get(
+ 'template_id') + '/action/clone'
+ new_url = self._generate_url(path)
+
+ linked_clone = kwargs.get('linked_clone')
+ if linked_clone is None:
+ linked_clone = False
+ LOG.info(_("[VRM-CINDER] start clone_vm()"))
+ body = {
+ 'name': 'cinder-vm-' + kwargs.get('volume_id'),
+ 'group': 'FSP',
+ 'description': 'cinder-driver-temp-vm',
+ 'autoBoot': 'false',
+ 'isLinkClone': linked_clone,
+ 'vmConfig': self._combine_vmConfig_4_clone(**kwargs),
+ }
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri, isShortQuery=1)
+ LOG.info(_("[VRM-CINDER] end clone_vm()"))
+ return body.get('urn')
+
+ def import_vm_from_glance(self, **kwargs):
+ '''import_vm_from_glance
+
+ :param kwargs:
+ :return:
+ '''
+ LOG.info(_("[VRM-CINDER] start import_vm_from_glance()"))
+ uri = self.site_uri + '/vms/action/import'
+ method = 'POST'
+ new_url = self._generate_url(uri)
+ link_nfs = kwargs.get('linkClone')
+ if link_nfs:
+ name = kwargs.get('image_id')
+ else:
+ name = kwargs.get('volume_id')
+
+ is_template = kwargs.get("is_template")
+ if is_template is None or is_template:
+ template = 'true'
+ else:
+ template = 'false'
+
+ if CONF.glance_host is None or str(CONF.glance_port) is None \
+ or FC_DRIVER_CONF.glance_server_ip is None:
+ raise exception.ParameterNotFound(param='glance_host or '
+ 'glance_port or glance_ip')
+
+ endpoint = CONF.glance_host + ":" + str(CONF.glance_port)
+ token = kwargs.get('auth_token')
+ serviceIp = FC_DRIVER_CONF.glance_server_ip
+ body = {
+ 'name': 'cinder-vm-' + name,
+ 'group': 'FSP',
+ 'description': 'cinder-glance-vm',
+ 'autoBoot': 'false',
+ 'location': kwargs.get("cluster_urn"),
+ 'osOptions': self._combine_os_options(**kwargs),
+ 'protocol': "glance",
+ 'vmConfig': self._combine_vmConfig_4_import(**kwargs),
+ 'isTemplate': template,
+ 'glanceConfig': {
+ 'imageID': kwargs.get('image_id'),
+ 'endPoint': endpoint,
+ 'serverIp': serviceIp,
+ 'token': token}
+ }
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+ LOG.info(_("[VRM-CINDER] end import_vm_from_glance()"))
+ return body.get('urn')
+
+ def import_vm_from_uds(self, **kwargs):
+ '''import_vm_from_uds
+
+ :param kwargs:
+ :return:
+ '''
+ LOG.info(_("[VRM-CINDER] start import_vm_from_uds()"))
+ uri = self.site_uri + '/vms/action/import'
+ method = 'POST'
+ new_url = self._generate_url(uri)
+ link_nfs = kwargs.get('linkClone')
+ if link_nfs:
+ name = kwargs.get('image_id')
+ else:
+ name = kwargs.get('volume_id')
+
+ is_template = kwargs.get("is_template")
+ if is_template is None or is_template:
+ template = 'true'
+ else:
+ template = 'false'
+
+ if FC_DRIVER_CONF.s3_store_access_key_for_cinder is None or \
+ FC_DRIVER_CONF.s3_store_secret_key_for_cinder is None:
+ LOG.error(_("[VRM-CINDER] some params is None, please check: "
+ "s3_store_access_key_for_cinder, "
+ "s3_store_secret_key_for_cinder"))
+ raise exception.ParameterNotFound(
+ param='s3_store_access_key_for_cinder or '
+ 's3_store_secret_key_for_cinder')
+
+ uds_name = FC_DRIVER_CONF.s3_store_access_key_for_cinder
+ uds_password = FC_DRIVER_CONF.s3_store_secret_key_for_cinder
+ location = kwargs.get('image_location')
+ location = location.split(":")
+ if len(location) != 4:
+ msg = _('image_location is invalid')
+ LOG.error(msg)
+ raise exception.ImageUnacceptable(image_id=kwargs.get('image_id'),
+ reason=msg)
+ serverIp = location[0].strip()
+ port = location[1].strip()
+ bucketName = location[2].strip()
+ key = location[3].strip()
+
+ body = {
+ 'name': 'cinder-vm-' + name,
+ 'group': 'FSP',
+ 'description': 'cinder-uds-vm',
+ 'autoBoot': 'false',
+ 'location': kwargs.get("cluster_urn"),
+ 'osOptions': self._combine_os_options(**kwargs),
+ 'protocol': "uds",
+ 'vmConfig': self._combine_vmConfig_4_import(**kwargs),
+ 'isTemplate': template,
+ 's3Config': {
+ 'serverIp': serverIp,
+ 'port': port,
+ 'accessKey': uds_name,
+ 'secretKey': uds_password,
+ 'bucketName': bucketName,
+ 'key': key
+ }
+ }
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+ LOG.info(_("[VRM-CINDER] end import_vm_from_uds()"))
+ return body.get('urn')
+
+ def import_vm_from_nfs(self, **kwargs):
+ '''import_vm_from_nfs
+
+ 'import_vm_from_image': ('POST',
+ ('/vms/action/import', None, None, None),
+ {},
+ dict({
+ 'name': 'name',
+ 'location': kwargs.get('cluster_urn'),
+ 'autoBoot': 'false',
+ 'url': kwargs.get('url'),
+ 'protocol': 'nfs',
+ 'vmConfig': {
+ 'cpu': {
+ 'quantity': 1
+ },
+ 'memory': {
+ 'quantityMB': 1024
+ },
+ 'disks': [
+ {
+ 'pciType': 'IDE',
+ 'datastoreUrn': kwargs.get('ds_urn'),
+ 'quantityGB': kwargs.get('vol_size'),
+ 'volType': 0,
+ 'sequenceNum': 1,
+ },
+ ],
+ },
+ 'osOptions': {
+ 'osType': 'Windows',
+ 'osVersion': 32
+ }
+ }),
+ True),
+ '''
+
+ LOG.info(_("[VRM-CINDER] start import_vm_from_nfs()"))
+ uri = self.site_uri + '/vms/action/import'
+ method = 'POST'
+ new_url = self._generate_url(uri)
+ link_nfs = kwargs.get('linkClone')
+ if link_nfs:
+ name = kwargs.get('image_id')
+ else:
+ name = kwargs.get('volume_id')
+
+ is_template = kwargs.get("is_template")
+ if is_template is None or is_template:
+ template = 'true'
+ else:
+ template = 'false'
+
+ body = \
+ {
+ 'name': 'cinder-vm-' + name,
+ 'group': 'FSP',
+ 'description': 'cinder-nfs-vm',
+ 'autoBoot': 'false',
+ 'location': kwargs.get("cluster_urn"),
+ 'osOptions': self._combine_os_options(**kwargs),
+ 'protocol': "nfs",
+ 'vmConfig': self._combine_vmConfig_4_import(**kwargs),
+ 'url': kwargs.get('image_location'),
+ 'isTemplate': template
+ }
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+ LOG.info(_("[VRM-CINDER] end import_vm_from_nfs()"))
+ return body.get('urn')
+
+ def create_volume_from_extend(self, **kwargs):
+ '''create_volume_from_extend
+
+ create_linkclone_volume
+
+ :param kwargs:
+ :return:
+ '''
+ LOG.info(_("[VRM-CINDER] start create_volume_from_extend()"))
+ image_type = kwargs.get('image_type')
+
+ if image_type == "nfs":
+ LOG.info(_("[VRM-CINDER] start create_volume_from_nfs"))
+ vm_urn = self.import_vm_from_nfs(**kwargs)
+ elif image_type == 'uds':
+ LOG.info(_("[VRM-CINDER] start create_volume_from_uds"))
+ vm_urn = self.import_vm_from_uds(**kwargs)
+ else:
+ LOG.info(_("[VRM-CINDER] start create_volume_from_glance"))
+ vm_urn = self.import_vm_from_glance(**kwargs)
+
+ vm_id = vm_urn[-10:]
+ vm = self.query_vm(vm_id=vm_id)
+ vm_config = vm['vmConfig']
+ disks = vm_config['disks']
+ volume_urn = None
+ if kwargs.get('volume_sequence_num') is None:
+ kwargs['volume_sequence_num'] = 1
+ for disk in disks:
+ if int(disk['sequenceNum']) == int(kwargs['volume_sequence_num']):
+ volume_urn = disk['volumeUrn']
+ break
+ if volume_urn is None:
+ msg = (_("[VRM-CINDER] no available disk"))
+ LOG.error(msg)
+ self.delete_vm(vm_id=vm_id)
+ raise exception.ImageUnacceptable(image_id=kwargs['image_id'],
+ reason=msg)
+ try:
+ self.detach_vol_from_vm(vm_id=vm_id, volume_urn=volume_urn)
+ except Exception as ex:
+ LOG.error(_('detach volume is failed'))
+ self.delete_vm(vm_id=vm_id)
+ raise ex
+
+ self.delete_vm(vm_id=vm_id)
+ LOG.info(_("[VRM-CINDER] end ()"))
+ return volume_urn
+
+ def check_template_status(self, vm_id):
+ LOG.info(_("[VRM-CINDER] start check template status()"))
+ uri = self.site_uri + '/vms/' + vm_id + '/competition'
+ method = 'GET'
+ new_url = self._generate_url(uri)
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ LOG.info(_("[VRM-CINDER] template info %s") % body)
+ body.get('status')
+
+ def check_template(self, **kwargs):
+ '''check_template
+
+ :param kwargs:
+ :return:
+ '''
+ template_id = kwargs.get('template_id')
+ vm_name = kwargs.get('vm_name')
+ templates = self.get_templates()
+ id = None
+ for template in templates:
+ if template_id is not None:
+ urn = template.get('urn')
+ id = urn[-10:]
+ if id == template_id:
+ LOG.info(_("[VRM-CINDER] template exists [%s]"),
+ template_id)
+ return id
+
+ if vm_name is not None:
+ name = template.get('name')
+ if name == vm_name:
+ urn = template.get('urn')
+ id = urn[-10:]
+ LOG.info(_("[VRM-CINDER] vm is exists [%s]"), vm_name)
+ return id
+ return id
+
+ def create_volume_from_template(self, **kwargs):
+ '''create_volume_from_template
+
+ create_linkclone_volume
+
+ :param kwargs:
+ :return:
+ '''
+ LOG.info(_("[VRM-CINDER] start create_volume_from_template()"))
+ template = kwargs['image_location']
+ kwargs['template_id'] = template[-10:]
+ is_exist = self.check_template(**kwargs)
+ if is_exist is None:
+ msg = (_("[VRM-CINDER] no such template %s "),
+ kwargs.get('template_id'))
+ raise exception.ImageUnacceptable(image_id=kwargs['image_id'],
+ reason=msg)
+
+ while True:
+ template_vm = self.query_vm(vm_id=kwargs['template_id'])
+ LOG.info(_("[VRM-CINDER] template_vm status is %s"),
+ template_vm.get('status'))
+ if 'creating' == template_vm.get('status'):
+ sleep(10)
+ elif 'stopped' == template_vm.get('status'):
+ break
+ else:
+ msg = (_("[VRM-CINDER] template isn't available %s "),
+ kwargs.get('template_id'))
+ LOG.error(msg)
+ raise exception.ImageUnacceptable(image_id=kwargs['image_id'],
+ reason=msg)
+
+ vm_config = template_vm['vmConfig']
+ template_disks = vm_config['disks']
+ if len(template_disks) != 1:
+ msg = _("template must have one disk")
+ LOG.error(msg)
+ raise exception.ImageUnacceptable(image_id=kwargs['image_id'],
+ reason=msg)
+
+ vm_urn = self.clone_vm(**kwargs)
+ vm_id = vm_urn[-10:]
+ vm = self.query_vm(vm_id=vm_id)
+ vm_config = vm['vmConfig']
+ disks = vm_config['disks']
+ volume_urn = None
+ if kwargs.get('volume_sequence_num') is None:
+ kwargs['volume_sequence_num'] = 1
+ for disk in disks:
+ if int(disk['sequenceNum']) == int(kwargs['volume_sequence_num']):
+ volume_urn = disk['volumeUrn']
+ break
+ if volume_urn is None:
+ msg = (_("[VRM-CINDER] no available disk"))
+ LOG.error(msg)
+ self.delete_vm(vm_id=vm_id)
+ raise exception.ImageUnacceptable(image_id=kwargs['image_id'],
+ reason=msg)
+ try:
+ self.delete_vm(vm_id=vm_id, isReserveDisks=1)
+ except Exception as ex:
+ LOG.error(_("delete vm(reserveDisk) is failed "))
+ self.delete_vm(vm_id=vm_id)
+ raise ex
+ LOG.info(_("[VRM-CINDER] end ()"))
+ return volume_urn
+
+ def create_linkclone_from_template(self, **kwargs):
+ '''create_linkclone_from_template
+
+ create_linkclone_volume
+
+ :param kwargs:
+ :return:
+ '''
+ LOG.info(_("[VRM-CINDER] start create_linkclone_from_template()"))
+ kwargs['linked_clone'] = True
+ return self.create_volume_from_template(**kwargs)
+
+ def create_linkClone_from_extend(self, **kwargs):
+ '''create_linkClone_from_extend
+
+ :param kwargs:
+ :return:
+ '''
+ LOG.debug(_("[VRM-CINDER] start create_linkClone_volume()"))
+ vrm_template_lock.acquire()
+
+ try:
+ kwargs['linkClone'] = True
+ vm_name = 'cinder-vm-' + kwargs.get('image_id')
+ LOG.info(_("[VRM-CINDER] vm_name is %s"), vm_name)
+ kwargs['vm_name'] = vm_name
+ vm_id = self.check_template(**kwargs)
+ kwargs.pop('vm_name')
+ LOG.info(_("[VRM-CINDER] vm_id is %s"), vm_id)
+ image_type = kwargs.get('image_type')
+ if kwargs.get('volume_sequence_num') is None:
+ kwargs['volume_sequence_num'] = 1
+ if 1 < int(kwargs['volume_sequence_num']):
+ msg = (_("[VRM-CINDER] volume_sequence_num is %s "),
+ kwargs['volume_sequence_num'])
+ LOG.error(msg)
+ raise exception.ImageUnacceptable(image_id=kwargs['image_id'],
+ reason=msg)
+
+ if vm_id is None:
+ kwargs["is_template"] = True
+ if image_type == 'nfs':
+ vm_urn = self.import_vm_from_nfs(**kwargs)
+ kwargs.pop('linkClone')
+ LOG.info(_("[VRM-CINDER] import_vm_from_nfs vm_urn is %s"),
+ vm_urn)
+ vm_id = vm_urn[-10:]
+ kwargs['image_location'] = vm_id
+
+ elif image_type == 'uds':
+ vm_urn = self.import_vm_from_uds(**kwargs)
+ kwargs.pop('linkClone')
+ LOG.info(_("[VRM-CINDER] import_vm_from_uds vm_urn is %s"),
+ vm_urn)
+ vm_id = vm_urn[-10:]
+ kwargs['image_location'] = vm_id
+
+ else:
+ vm_urn = self.import_vm_from_glance(**kwargs)
+ kwargs.pop('linkClone')
+ LOG.info(
+ _("[VRM-CINDER] import_vm_from_glance vm_urn is %s"),
+ vm_urn)
+ vm_id = vm_urn[-10:]
+ kwargs['image_location'] = vm_id
+
+ else:
+ kwargs.pop('linkClone')
+ kwargs['image_location'] = vm_id
+ except Exception as ex:
+ vrm_template_lock.release()
+ raise ex
+
+ vrm_template_lock.release()
+ return self.create_linkclone_from_template(**kwargs)
+
+ def get_templates(self, **kwargs):
+ '''get_templates
+
+ 'list_templates': ('GET',
+ ('/vms', None, None, None),
+ {'limit': kwargs.get('limit'),
+ 'offset': kwargs.get('offset'),
+ 'scope': kwargs.get('scope'),
+ 'isTemplate': 'true'
+ },
+ {},
+ False),
+ '''
+
+ LOG.info(_("[VRM-CINDER] start _get_templates()"))
+ uri = '/vms'
+ method = 'GET'
+ path = self.site_uri + uri
+
+ offset = 0
+ templates = []
+ while True:
+ parames = {
+ 'limit': self.limit,
+ 'offset': offset,
+ 'scope': kwargs.get('scope'),
+ 'isTemplate': 'true'
+ }
+ appendix = self._joined_params(parames)
+ new_url = self._generate_url(path, appendix)
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ total = int(body.get('total') or 0)
+ if total > 0:
+ res = body.get('vms')
+ templates += res
+ offset += len(res)
+ if offset >= total or len(templates) >= total or len(
+ res) < self.limit:
+ break
+ else:
+ break
+
+ LOG.info(_("[VRM-CINDER] end ()"))
+ return templates
+
+ def create_vm(self, **kwargs):
+ '''Create Vm
+
+ :param kwargs:
+ :return:
+ '''
+
+ LOG.info(_("[VRM-CINDER] start create_vm()"))
+ uri = '/vms'
+ method = 'POST'
+ path = self.site_uri + uri
+ new_url = self._generate_url(path)
+
+ body = {
+ 'name': 'cinder-driver-temp-' + kwargs.get('volume_id'),
+ 'group': 'FSP',
+ 'description': 'cinder-driver-temp-vm',
+ 'autoBoot': 'false',
+ 'location': kwargs.get("cluster_urn"),
+ 'vmConfig': self._combine_empty_vmConfig(**kwargs),
+ 'osOptions': self._combine_os_options(**kwargs)
+ }
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+ LOG.info(_("[VRM-CINDER] end create_vm()"))
+ return body
+
+ def export_vm_to_glance(self, **kwargs):
+ '''export_vm_to_glance
+
+ :param kwargs:
+ :return:
+ '''
+ LOG.info(_("[VRM-CINDER] start export_vm_to_glance"))
+ uri = '/vms'
+ method = 'POST'
+ path = self.site_uri + uri + '/' + kwargs.get(
+ 'vm_id') + '/action/export'
+ new_url = self._generate_url(path)
+ if CONF.glance_host is None or str(CONF.glance_port) is None \
+ or FC_DRIVER_CONF.glance_server_ip is None:
+ raise exception.ParameterNotFound(param='glance_host or '
+ 'glance_port or glance_ip')
+
+ endpoint = CONF.glance_host + ":" + str(CONF.glance_port)
+ token = kwargs.get('auth_token')
+ serviceIp = FC_DRIVER_CONF.glance_server_ip
+
+ format = 'xml' if FC_DRIVER_CONF.export_version == 'v1.2' else 'ovf'
+ body = {
+ 'name': kwargs.get('image_id'),
+ 'format': format,
+ 'protocol': 'glance',
+ 'isOverwrite': 'false',
+ 'vmConfig': self._combine_vmConfig_4_export(**kwargs),
+ 'glanceConfig': {
+ 'imageID': kwargs.get('image_id'),
+ 'endPoint': endpoint,
+ 'serverIp': serviceIp,
+ 'token': token}
+ }
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+ LOG.info(_("[VRM-CINDER] end export_vm_to_glance"))
+ return body.get('urn')
+
+ def export_vm_to_uds(self, **kwargs):
+ '''export_vm_to_uds
+
+ :param kwargs:
+ :return:
+ '''
+ LOG.info(_("[VRM-CINDER] start export_vm_to_uds"))
+ uri = '/vms'
+ method = 'POST'
+ path = self.site_uri + uri + '/' + kwargs.get(
+ 'vm_id') + '/action/export'
+ new_url = self._generate_url(path)
+
+ if FC_DRIVER_CONF.s3_store_access_key_for_cinder is None \
+ or FC_DRIVER_CONF.s3_store_secret_key_for_cinder is None \
+ or FC_DRIVER_CONF.uds_port is None or FC_DRIVER_CONF.uds_ip \
+ is None \
+ or FC_DRIVER_CONF.uds_bucket_name is None:
+ LOG.error(_("[VRM-CINDER] some params is None, please check: "
+ "s3_store_access_key_for_cinder, "
+ "s3_store_secret_key_for_cinder, uds_port, uds_ip, "
+ "uds_bucket_name"))
+ raise exception.ParameterNotFound(
+ param='s3_store_access_key_for_cinder '
+ 'or s3_store_secret_key_for_cinder or'
+ 'uds_port or uds_serverIp or uds_bucket_name')
+
+ uds_name = FC_DRIVER_CONF.s3_store_access_key_for_cinder
+ uds_password = FC_DRIVER_CONF.s3_store_secret_key_for_cinder
+ port = FC_DRIVER_CONF.uds_port
+ serverIp = FC_DRIVER_CONF.uds_ip
+ bucketName = FC_DRIVER_CONF.uds_bucket_name
+ key = kwargs.get('image_id')
+ bucket_type = FC_DRIVER_CONF.uds_bucket_type
+ if bucket_type is not None:
+ if str(bucket_type) == 'wildcard':
+ if kwargs.get('project_id') is None:
+ LOG.error(_("project_id is none "))
+ raise exception.ParameterNotFound(
+ param='project_id is none')
+ else:
+ bucketName += kwargs.get('project_id')
+
+ LOG.info(_("[VRM-CINDER] bucketName is %s"), bucketName)
+
+ format = 'xml' if FC_DRIVER_CONF.export_version == 'v1.2' else 'ovf'
+ body = {
+ 'name': kwargs.get('image_id'),
+ 'format': format,
+ 'protocol': 'uds',
+ 'isOverwrite': 'false',
+ 'vmConfig': self._combine_vmConfig_4_export(**kwargs),
+ 's3Config': {
+ 'serverIp': serverIp,
+ 'port': port,
+ 'accessKey': uds_name,
+ 'secretKey': uds_password,
+ 'bucketName': bucketName,
+ 'key': key}
+ }
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+ LOG.info(_("[VRM-CINDER] end export_vm_to_uds"))
+ return body.get('urn')
+
+ def export_vm_to_nfs(self, **kwargs):
+ '''export_vm_to_nfs
+
+ Post //action/export HTTP/1.1
+ Host: https://:
+ Content-Type: application/json; charset=UTF-8
+ Accept: application/json;version=; charset=UTF-8
+ X-Auth-Token:
+ {
+ "name":string,
+ "url:string,
+ "username":administrator
+ "password":string
+ }
+ '''
+
+ LOG.info(_("[VRM-CINDER] start export_vm_to_nfs()"))
+ uri = '/vms'
+ method = 'POST'
+ path = self.site_uri + uri + '/' + kwargs.get(
+ 'vm_id') + '/action/export'
+ new_url = self._generate_url(path)
+
+ format = 'xml' if FC_DRIVER_CONF.export_version == 'v1.2' else 'ovf'
+ body = {
+ 'name': kwargs.get('image_id'),
+ 'url': kwargs.get('image_url') + '/' + kwargs.get('image_id'),
+ 'vmConfig': self._combine_vmConfig_4_export(**kwargs),
+ 'format': format,
+ 'protocol': 'nfs',
+ 'isOverwrite': 'false'
+ }
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+ LOG.info(_("[VRM-CINDER] end export_vm_to_nfs()"))
+ return body.get('urn')
+
+ def get_volume_sequence_num(self, vm_body, volume_urn):
+ volume_sequence_num = 1
+ if vm_body is not None:
+ vm_uri = vm_body.get('uri')
+ volume_body = self.query_vm(vm_id=vm_uri[-10:])
+ if volume_body is not None:
+ for item in volume_body.get('vmConfig').get('disks'):
+ if item.get('volumeUrn') == volume_urn:
+ volume_sequence_num = item.get("sequenceNum")
+ else:
+ LOG.info(_("[VRM-CINDER] vm_body is null)"))
+ return volume_sequence_num
+
+ def export_volume_to_image(self, **kwargs):
+ '''export_volume_to_image
+
+ :param kwargs:
+ :return:
+ '''
+ LOG.info(_("[VRM-CINDER] start export_volume_to_image()"))
+ vm_body = self.query_vm_volume(**kwargs)
+ if vm_body is not None:
+ kwargs['volume_sequence_num'] = self.get_volume_sequence_num(
+ vm_body,
+ kwargs.get('volume_urn'))
+ vm_uri = vm_body.get('uri')
+ kwargs['vm_id'] = vm_uri[-10:]
+ LOG.info(_("[VRM-CINDER] volume is already attached"))
+ self.export_attached_volume_to_image(**kwargs)
+ LOG.info(_("[VRM-CINDER] volume_sequence_num %s"),
+ kwargs['volume_sequence_num'])
+ return kwargs['volume_sequence_num']
+
+ vm_body = self.create_vm(**kwargs)
+ vm_uri = vm_body.get('uri')
+
+ kwargs['vm_id'] = vm_uri[-10:]
+
+ if kwargs.get('shareable') == 'share':
+ kwargs['volume_sequence_num'] = 2
+ else:
+ kwargs['volume_sequence_num'] = 1
+
+ try:
+ self.attach_vol_to_vm(**kwargs)
+ except Exception as ex:
+ LOG.error(_("[VRM-CINDER] attach volume is error "))
+ self.delete_vm(**kwargs)
+ raise ex
+ try:
+ self.export_attached_volume_to_image(**kwargs)
+ except Exception as ex:
+ LOG.error(_("[VRM-CINDER] export vm is error "))
+ self.detach_vol_from_vm(**kwargs)
+ self.delete_vm(**kwargs)
+ raise ex
+
+ self.detach_vol_from_vm(**kwargs)
+
+ self.delete_vm(**kwargs)
+
+ LOG.info(_("[VRM-CINDER] end export_volume_to_nfs()"))
+ LOG.info(_("[VRM-CINDER] volume_sequence_num %s"),
+ kwargs['volume_sequence_num'])
+ return kwargs['volume_sequence_num']
+
+ def export_attached_volume_to_image(self, **kwargs):
+ image_type = kwargs.get('image_type')
+ if image_type == 'nfs':
+ LOG.info(_('[VRM-CINDER] export_vm_to_nfs'))
+ self.export_vm_to_nfs(**kwargs)
+ elif image_type == 'uds':
+ LOG.info(_('VRM-CINDER] export_vm_to_uds'))
+ self.export_vm_to_uds(**kwargs)
+ else:
+ LOG.info(_('VRM-CINDER] export_vm_to_glance'))
+ self.export_vm_to_glance(**kwargs)
+
+ LOG.info(_("[VRM-CINDER] end export_attached_volume_to_image()"))
+
+ def query_vm_volume(self, **kwargs):
+ '''query_vm_volume
+
+ URL:https://192.168.106.111:8443/OmsPortal/service/sites/4286080F/vms
+ ?scope=urn%3Asites%3A4286080F%3Avolumes%3A137
+ &detail=2
+ '''
+
+ LOG.info(_("[VRM-CINDER] start query_vm_volume()"))
+ uri = '/vms'
+ method = 'GET'
+ path = self.site_uri + uri
+ parames = {
+ 'scope': kwargs.get('volume_urn'),
+ 'detail': 0
+ }
+ appendix = self._joined_params(parames)
+ new_url = self._generate_url(path, appendix)
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ total = int(body.get('total') or 0)
+ vm = None
+ if total >= 1:
+ vms = body.get('vms')
+ vm = vms[0]
+ else:
+ LOG.info(_("[VRM-CINDER] find no vms()"))
+ LOG.info(_("[VRM-CINDER] end query_vm_volume()"))
+ return vm
+
+ def migrate_vm_volume(self, **kwargs):
+ '''migrate_vm_volume
+
+ Post ///action/migratevol HTTP/1.1
+ Host: https://:
+ Content-Type: application/json; charset=UTF-8
+ Accept: application/json;version=; charset=UTF-8
+ X-Auth-Token:
+ {
+ "disks":[
+ {
+ "volumeUrn":string
+ "datastoreUrn":string //urn:sites:1:datastores:1
+ }
+ ],
+ "speed": integer
+ }
+ '''
+
+ LOG.info(_("[VRM-CINDER] start migrate_vm_volume()"))
+ uri = '/vms'
+ method = 'POST'
+ path = self.site_uri + uri + '/' + kwargs.get(
+ 'vm_id') + '/action/migratevol'
+ new_url = self._generate_url(path)
+
+ body = {
+ "disks": [
+ {
+ "volumeUrn": kwargs.get('volume_urn'),
+ "datastoreUrn": kwargs.get('dest_ds_urn'),
+ 'migrateType': kwargs.get('migrate_type')
+ }
+ ],
+ "speed": kwargs.get('speed')
+ }
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+ LOG.info(_("[VRM-CINDER] end migrate_vm_volume()"))
+ return body.get('urn')
diff --git a/cinder/volume/drivers/huawei/fusioncompute/volume_proxy.py b/cinder/volume/drivers/huawei/fusioncompute/volume_proxy.py
new file mode 100644
index 0000000..7869dc2
--- /dev/null
+++ b/cinder/volume/drivers/huawei/fusioncompute/volume_proxy.py
@@ -0,0 +1,601 @@
+# Copyright 2016 Huawei Technologies Co.,LTD.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+[VRM DRIVER] VRM CLIENT.
+
+"""
+import json
+
+from cinder import exception as cinder_exception
+from cinder.i18n import _
+from cinder.volume.drivers.huawei.vrm.base_proxy import BaseProxy
+from cinder.volume.drivers.huawei.vrm import exception as driver_exception
+from cinder.volume.drivers.huawei.vrm.task_proxy import TaskProxy
+
+from oslo_log import log as logging
+
+TASK_WAITING = 'waiting'
+TASK_RUNNING = 'running'
+TASK_SUCCESS = 'success'
+TASK_FAILED = 'failed'
+TASK_CANCELLING = 'cancelling'
+TASK_UNKNOWN = 'unknown'
+
+LOG = logging.getLogger(__name__)
+
+
+class VolumeProxy(BaseProxy):
+ def __init__(self):
+ super(VolumeProxy, self).__init__()
+ self.task_proxy = TaskProxy()
+
+ def filter_not_none_dict(self, src_dic):
+ obj_dic = {}
+ for index_dic in src_dic:
+ if src_dic.get(index_dic) is not None:
+ obj_dic.update({index_dic: src_dic.get(index_dic)})
+ return obj_dic
+
+ def query_volume(self, **kwargs):
+ '''query_volume
+
+ 'query_volume': ('GET',
+ ('/volumes', kwargs.get(self.RESOURCE_URI),
+ None,
+ kwargs.get('id')),
+ {'limit': kwargs.get('limit'),
+ 'offset': kwargs.get('offset'),
+ 'scope': kwargs.get('scope')
+ },
+ {},
+ False),
+ '''
+ LOG.info(_("[VRM-CINDER] start query_volume()"))
+ uri = '/volumes'
+ method = 'GET'
+ path = self.site_uri + uri + '/' + kwargs.get('id')
+
+ new_url = self._generate_url(path)
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ return body
+
+ def query_volume_replications(self, **kwargs):
+ '''query_volume_replications
+
+ 'query_volume_replications': ('GET',
+ ('/volumes/{volume_id}/action/replications',
+ kwargs.get(self.RESOURCE_URI), None,
+ kwargs.get('volume_id')),
+ False),
+ '''
+ LOG.info(_("[VRM-CINDER] start query_volume_replications()"))
+ uri = '/volumes'
+ method = 'GET'
+ path = self.site_uri + uri + '/' + kwargs.get(
+ 'volume_id') + '/action/replications'
+
+ new_url = self._generate_url(path)
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ return body
+
+ def list_volumes(self, **kwargs):
+ '''list_volumes
+
+ 'list_volumes': ('GET',
+ ('/volumes', kwargs.get(self.RESOURCE_URI),
+ None,
+ kwargs.get('id')),
+ {'limit': kwargs.get('limit'),
+ 'offset': kwargs.get('offset'),
+ 'scope': kwargs.get('scope')
+ },
+ {},
+ False),
+ '''
+ LOG.info(_("[VRM-CINDER] start query_volumesnapshot()"))
+ uri = '/volumes/compatibility/discovery '
+ method = 'GET'
+ path = self.site_uri + uri
+
+ offset = 0
+ volumes = []
+ volumes_map = {} # use map to clean repeat
+ while True:
+ parames = {
+ 'limit': self.limit,
+ 'offset': offset,
+ 'scope': kwargs.get('scope'),
+ 'uuid': kwargs.get('uuid')
+ }
+ appendix = self._joined_params(parames)
+ new_url = self._generate_url(path, appendix)
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ total = int(body.get('total') or 0)
+
+ if total > 0:
+ res = body.get('volumes')
+ for volume in res:
+ volumes_map[volume.get('uuid')] = volume
+ volumes = volumes_map.values()
+ volumes += res
+ offset += len(res)
+ if offset >= total or len(volumes) >= total or len(
+ res) < self.limit:
+ break
+ if offset > 5:
+ offset -= 5
+ else:
+ break
+
+ for index_volume in volumes:
+ if index_volume.get(
+ 'customProperties') is not None and index_volume.get(
+ 'customProperties').get('external_uuid') is not None:
+ index_volume["uuid"] = index_volume["customProperties"][
+ "external_uuid"]
+ return volumes
+
+ def list_volumes_extend(self, **kwargs):
+ '''list_volumes_extend
+
+ 'list_volumes_extend': ('GET',
+ ('/volumes', kwargs.get(self.RESOURCE_URI),
+ None,
+ kwargs.get('id')),
+ {'limit': kwargs.get('limit'),
+ 'offset': kwargs.get('offset'),
+ 'scope': kwargs.get('scope')
+ },
+ {},
+ False),
+ '''
+ LOG.info(_("[VRM-CINDER] start query_volumesnapshot()"))
+ uri = '/volumes/extend'
+ method = 'GET'
+ path = self.site_uri + uri
+
+ offset = 0
+ volumes = []
+ volumes_map = {} # use map to clean repeat
+ while True:
+ parames = {
+ 'limit': self.limit,
+ 'offset': offset,
+ 'scope': kwargs.get('scope'),
+ 'uuid': kwargs.get('uuid')
+ }
+ appendix = self._joined_params(parames)
+ new_url = self._generate_url(path, appendix)
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ total = int(body.get('total') or 0)
+ if total > 0:
+ res = body.get('volumes')
+ for volume in res:
+ volumes_map[volume.get('uuid')] = volume
+ volumes = volumes_map.values()
+ volumes += res
+ offset += len(res)
+ if offset >= total or len(volumes) >= total or len(
+ res) < self.limit:
+ break
+ if offset > 5:
+ offset -= 5
+ else:
+ break
+
+ for index_volume in volumes:
+ if index_volume.get(
+ 'customProperties') is not None and index_volume.get(
+ 'customProperties').get('external_uuid') is not None:
+ index_volume["uuid"] = index_volume["customProperties"][
+ "external_uuid"]
+
+ return volumes
+
+ def create_volume(self, **kwargs):
+ '''create_volume
+
+ 'create_volume': ('POST',
+ ('/volumes', None, None, None),
+ {},
+ {'name': kwargs.get('name'),
+ 'quantityGB': kwargs.get('quantityGB'),
+ 'datastoreUrn': kwargs.get('datastoreUrn'),
+ 'uuid': kwargs.get('uuid'),
+ 'isThin': kwargs.get('isThin'),
+ 'type': kwargs.get('type'),
+ 'indepDisk': kwargs.get('indepDisk'),
+ 'persistentDisk':
+ kwargs.get('persistentDisk'),
+ 'volumeId': kwargs.get('volumeId'),
+ 'snapshotUuid': kwargs.get('snapshotUuid'),
+ 'imageUrl': kwargs.get('imageUrl'),
+ },
+ True),
+ '''
+ LOG.info(_("[VRM-CINDER] start create_volume()"))
+ uri = '/volumes'
+ method = 'POST'
+ path = self.site_uri + uri
+ new_url = self._generate_url(path)
+ body = {
+ 'name': kwargs.get('name'),
+ 'quantityGB': kwargs.get('size'),
+ 'datastoreUrn': kwargs.get('ds_urn'),
+ 'uuid': kwargs.get('uuid'),
+ 'isThin': kwargs.get('is_thin'),
+ 'type': kwargs.get('type'),
+ 'indepDisk': kwargs.get('independent'),
+ 'customProperties': {"external_uuid": kwargs.get('uuid')}
+ }
+ support_pvscsi = kwargs.get('support_pvscsi', None)
+ if support_pvscsi is not None:
+ body.update({'pvscsiSupport': support_pvscsi})
+
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ return body
+
+ def delete_volume(self, **kwargs):
+ '''delete_volume
+
+ 'delete_volume': ('DELETE',
+ ('/volumes', kwargs.get(self.RESOURCE_URI),
+ None,
+ None),
+ {},
+ {},
+ True),
+ '''
+ LOG.info(_("[VRM-CINDER] start delete_volume()"))
+ method = 'DELETE'
+ path = kwargs.get('volume_uri')
+ new_url = self._generate_url(path)
+ error_busy = ['10300057', '10420005', '10420009', '10410154',
+ '10420137', '10420138', '10430058']
+ try:
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+ except driver_exception.ClientException as ex:
+ LOG.info(_("[VRM-CINDER] delete volume (%s)"), ex.errorCode)
+ if ex.errorCode == "10420004":
+ return
+ elif ex.errorCode in error_busy:
+ LOG.info(_(
+ "[VRM-CINDER] volume status conflicts, try delete later."))
+ raise cinder_exception.VolumeIsBusy(message=ex.errorCode)
+ else:
+ raise ex
+
+ def clone_volume(self, **kwargs):
+ '''clone_volume
+
+ 'clone_volume': ('POST',
+ ('/volumes', None, kwargs.get('src_name'),'action/copyVol'),
+ {},
+ {'destinationVolumeID':
+ kwargs.get('dest_name')
+ },
+ True),
+ '''
+ LOG.info(_("[VRM-CINDER] start clone_volume()"))
+ uri = '/volumes'
+ method = 'POST'
+ path = self.site_uri + uri + '/' + kwargs.get(
+ 'src_volume_id') + '/action/copyVol'
+ body = {'dstVolUrn': kwargs.get('dest_volume_urn')}
+ new_url = self._generate_url(path)
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+
+ def _copy_nfs_image_to_volume(self, **kwargs):
+ '''_copy_nfs_image_to_volume
+
+ 'copy_image_to_volume': ('POST',
+ ('/volumes/imagetovolume', None, None, None),
+ {},
+ {
+ 'volumePara': {
+ 'quantityGB':
+ kwargs.get('volume_size'),
+ 'urn': kwargs.get('volume_urn')
+ },
+ 'imagePara': {
+ 'id': kwargs.get('image_id'),
+ 'url': kwargs.get('image_location')
+ },
+ 'location': kwargs.get('cluster_urn'),
+ 'needCreateVolume': False
+ },
+ True),
+ '''
+ LOG.info(_("[VRM-CINDER] start copy_image_to_volume()"))
+ uri = '/volumes/imagetovolume'
+ method = 'POST'
+ path = self.site_uri + uri
+ new_url = self._generate_url(path)
+ body = {
+ 'volumePara': {
+ 'quantityGB': kwargs.get('volume_size'),
+ 'urn': kwargs.get('volume_urn')
+ },
+ 'imagePara': {
+ 'id': kwargs.get('image_id'),
+ 'url': kwargs.get('image_location')
+ },
+ 'location': kwargs.get('cluster_urn'),
+ 'needCreateVolume': False
+ }
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+
+ def _copy_volume_to_image(self, **kwargs):
+ '''_copy_volume_to_image
+
+ 'copy_volume_to_image': ('POST',
+ ('/volumes/volumetoimage', None, None, None),
+ {},
+ {
+ 'volumePara': {'urn':
+ kwargs.get('volume_urn'),
+ 'quantityGB':
+ kwargs.get('volume_size')},
+ 'imagePara': {
+ 'id': kwargs.get(
+ 'image_id'),
+ 'url': kwargs.get(
+ 'image_url')}
+ },
+ True),
+ '''
+
+ LOG.info(_("[VRM-CINDER] start stop_vm()"))
+ uri = '/volumes/volumetoimage'
+ method = 'POST'
+ path = self.site_uri + uri
+ new_url = self._generate_url(path)
+
+ body = {
+ 'volumePara': {
+ 'urn': kwargs.get('volume_urn'),
+ 'quantityGB': kwargs.get('volume_size')},
+ 'imagePara': {
+ 'id': kwargs.get('image_id'),
+ 'url': kwargs.get('image_url')}
+ }
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+
+ def manage_existing(self, **kwargs):
+ '''manage_existing
+
+ 'manage_existing': ('POST',
+ ('/volumes', None, None, None),
+ {},
+ {'name': kwargs.get('name'),
+ 'quantityGB': kwargs.get('quantityGB'),
+ 'datastoreUrn': kwargs.get('datastoreUrn'),
+ 'uuid': kwargs.get('uuid'),
+ 'type': kwargs.get('type'),
+ 'indepDisk': kwargs.get('indepDisk'),
+ 'persistentDisk':
+ kwargs.get('persistentDisk'),
+ 'volumeId': kwargs.get('volumeId'),
+ 'snapshotUuid': kwargs.get('snapshotUuid'),
+ 'imageUrl': kwargs.get('imageUrl'),
+ },
+ True),
+ '''
+ LOG.info(_("[VRM-CINDER] start create_volume()"))
+ uri = '/volumes/registevol'
+ method = 'POST'
+ path = self.site_uri + uri
+ new_url = self._generate_url(path)
+ body = {
+ 'name': kwargs.get('name'),
+ 'quantityGB': kwargs.get('quantityGB'),
+ 'volInfoUrl': kwargs.get('volInfoUrl'),
+ 'uuid': kwargs.get('uuid'),
+ 'customProperties': {"external_uuid": kwargs.get('uuid')},
+ 'type': kwargs.get('type'),
+ 'maxReadBytes': kwargs.get('maxReadBytes'),
+ 'maxWriteBytes': kwargs.get('maxWriteBytes'),
+ 'maxReadRequest': kwargs.get('maxReadRequest'),
+ 'maxWriteRequest': kwargs.get('maxWriteRequest')}
+
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ return body
+
+ def unmanage(self, **kwargs):
+ '''unmanage
+
+ 'unmanage': ('DELETE',
+ ('/volumes?isOnlyDelDB=1',
+ kwargs.get(self.RESOURCE_URI), None, None),
+ {},
+ {},
+ True),
+ '''
+ LOG.info(_("[VRM-CINDER] start unmanage()"))
+ method = 'DELETE'
+ path = kwargs.get('volume_uri') + '?isOnlyDelDB=1'
+ new_url = self._generate_url(path)
+ try:
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+ except driver_exception.ClientException as ex:
+ LOG.info(_("[VRM-CINDER] unmanage volume (%s)"), ex.errorCode)
+ if ex.errorCode == "10420004":
+ return
+ else:
+ raise ex
+
+ def migrate_volume(self, **kwargs):
+ '''migrate_volume
+
+ Post //action/migratevol HTTP/1.1
+ Host https://:
+ Accept application/json;version=; charset=UTF-8
+ X-Auth-Token:
+ {
+ 'datastoreUrn':string,
+ 'speed': integer
+ }
+ '''
+ LOG.info(_("[VRM-CINDER] start migrate_volume()"))
+ uri = '/volumes'
+ method = 'POST'
+
+ path = self.site_uri + uri + '/' + kwargs.get(
+ 'volume_id') + '/action/migratevol'
+ body = {'datastoreUrn': kwargs.get('dest_ds_urn'),
+ 'speed': kwargs.get('speed'),
+ 'migrateType': kwargs.get('migrate_type')}
+ new_url = self._generate_url(path)
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+
+ def modify_volume(self, **kwargs):
+ LOG.info(_("[VRM-CINDER] start modify_volume()"))
+ uri = '/volumes'
+ method = 'PUT'
+ path = self.site_uri + uri + '/' + kwargs.get('volume_id')
+ body = {}
+ if kwargs.get('type') is not None:
+ body.update({'type': kwargs.get('type')})
+ if kwargs.get('name') is not None:
+ body.update({'name': kwargs.get('name')})
+ new_url = self._generate_url(path)
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ if resp.status_code not in (200, 204):
+ raise driver_exception.ClientException(101)
+
+ def extend_volume(self, **kwargs):
+ '''extend_volume
+
+ 'extend_volume': ('POST',
+ (kwargs.get('volume_uri'),'/action/expandVol',
+ None, None),
+ {},
+ {},
+ True),
+ '''
+ LOG.info(_("[VRM-CINDER] start extend_volume()"))
+ method = 'POST'
+ body = {'size': kwargs.get('size')}
+ volume_uri = kwargs.get('volume_uri')
+
+ path = volume_uri + '/action/expandVol'
+ new_url = self._generate_url(path)
+
+ try:
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+
+ task_uri = body.get('taskUri')
+
+ self.task_proxy.wait_task(task_uri=task_uri)
+ except driver_exception.ClientException as ex:
+ LOG.info(_("[VRM-CINDER] extend volume (%s)"), ex.errorCode)
+ raise ex
+ except Exception as ex:
+ LOG.info(_("[VRM-CINDER] extend volume (%s)"), ex)
+ raise ex
+
+ def wait_task(self, **kwargs):
+ task_uri = kwargs.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+
+ def query_volume_by_custom_properties(self, **kwargs):
+ '''query_volume_by_custom_properties
+
+ 'query_volume_by_custom_properties': ('POST',
+ ('/volumes/queryby/custom-properties', None, None,
+ None),
+ {},
+ {"condition": {"name": "external_uuid",
+ "value": "93DF26A9-2D3B-43A6-A7D8-CF5E9D0DC17C"
+ },
+ "offset": 0,
+ limit": 10
+ },
+ True),
+ '''
+ LOG.info(_("[VRM-CINDER] start query_volume_by_custom_properties()"))
+ uri = '/volumes/queryby/custom-properties'
+ method = 'POST'
+ path = self.site_uri + uri
+ new_url = self._generate_url(path)
+ body = {
+ 'condition': {
+ 'name': 'external_uuid',
+ 'value': kwargs.get('external_uuid')
+ }
+ }
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ volumes = []
+ total = int(body.get('total') or 0)
+ if total > 0:
+ res = body.get('volumes')
+ volumes += res
+ return volumes[0]
+ return None
+
+ def update_custom_properties(self, **kwargs):
+ '''update_custom_properties
+
+ 'update_by_custom_properties': ('POST',
+ ('/volumes/properties/custom', None, None, None),
+ {},
+ {"volumes": [
+ 'volumeUrn': kwargs.get('volumeUrn'),
+ 'customProperties': {
+ 'external_uuid': kwargs.get('external_uuid')
+ }]
+ },
+ True),
+ '''
+ LOG.info(_("[VRM-CINDER] start modify_custom_properties()"))
+ uri = '/volumes/properties/custom'
+ method = 'POST'
+ path = self.site_uri + uri
+ new_url = self._generate_url(path)
+ body = {
+ 'volumes': [
+ {
+ 'volumeUrn': kwargs.get('volume_urn'),
+ 'customProperties': {
+ 'external_uuid': kwargs.get('external_uuid')
+ }
+ }
+ ]
+ }
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ return body
diff --git a/cinder/volume/drivers/huawei/fusioncompute/volume_snapshot_proxy.py b/cinder/volume/drivers/huawei/fusioncompute/volume_snapshot_proxy.py
new file mode 100644
index 0000000..9858610
--- /dev/null
+++ b/cinder/volume/drivers/huawei/fusioncompute/volume_snapshot_proxy.py
@@ -0,0 +1,308 @@
+# Copyright 2016 Huawei Technologies Co.,LTD.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+[VRM DRIVER] VRM CLIENT.
+
+"""
+import json
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from cinder import exception as cinder_exception
+from cinder.i18n import _
+from cinder.volume.drivers.huawei.vrm.base_proxy import BaseProxy
+from cinder.volume.drivers.huawei.vrm import exception as driver_exception
+from cinder.volume.drivers.huawei.vrm.task_proxy import TaskProxy
+from cinder.volume.drivers.huawei.vrm.utils import Delete_Snapshot_Code
+
+
+CONF = cfg.CONF
+try:
+ from eventlet import sleep
+except ImportError:
+ from time import sleep
+
+TASK_WAITING = 'waiting'
+TASK_RUNNING = 'running'
+TASK_SUCCESS = 'success'
+TASK_FAILED = 'failed'
+TASK_CANCELLING = 'cancelling'
+TASK_UNKNOWN = 'unknown'
+
+LOG = logging.getLogger(__name__)
+
+
+class VolumeSnapshotProxy(BaseProxy):
+ def __init__(self, *args, **kwargs):
+ super(VolumeSnapshotProxy, self).__init__()
+ LOG.info(_("[VRM-CINDER] start __init__()"))
+ self.task_proxy = TaskProxy()
+ self.create_retry = ['10000009', '10430056', '10430050', '10300421',
+ '10430057', '10300318', '10300162', '10300026']
+ self.delete_retry = Delete_Snapshot_Code
+ self.number = CONF.vrm_vol_snapshot_retries
+ self.sleep_time = CONF.vrm_snapshot_sleeptime
+ if not self.number:
+ LOG.error('conf number is None, use 3')
+ self.number = 3
+
+ if not self.sleep_time:
+ LOG.error('conf sleeptime is None, use 300')
+ self.sleep_time = 300
+
+ def query_volumesnapshot(self, **kwargs):
+ '''query_volumesnapshot
+
+ 'list_volumesnapshot': ('GET',
+ ('/volumesnapshots', None, kwargs.get('uuid'),
+ None),
+ {'limit': kwargs.get('limit'),
+ 'offset': kwargs.get('offset'),
+ 'scope': kwargs.get('scope')
+ },
+ {},
+ False),
+ '''
+ LOG.info(_("[VRM-CINDER] start query_volumesnapshot()"))
+ uri = '/volumesnapshots'
+ method = 'GET'
+ path = self.site_uri + uri + '/' + kwargs.get('uuid')
+ new_url = self._generate_url(path)
+ try:
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ except driver_exception.ClientException as ex:
+ LOG.info(_("[VRM-CINDER] query snapshot (%s)"), ex.errorCode)
+ if ex.errorCode == "10430051":
+ return None
+ else:
+ raise ex
+
+ '''
+ error_code = body.get('errorCode')
+ if error_code != None:
+ if '10430010' == error_code:
+ LOG.info(_("[VRM-CINDER] snapshot not exist"))
+ return None
+ '''
+
+ return body
+
+ def list_snapshot(self, **kwargs):
+ '''list_snapshot
+
+ 'list_volumesnapshot': ('GET',
+ ('/volumesnapshots', None, kwargs.get('uuid'),
+ None),
+ {'limit': kwargs.get('limit'),
+ 'offset': kwargs.get('offset'),
+ 'scope': kwargs.get('scope')
+ },
+ {},
+ False),
+ '''
+ LOG.info(_("[VRM-CINDER] start list_snapshot()"))
+ uri = '/volumesnapshots/queryVolumeSnapshots'
+ method = 'GET'
+ path = self.site_uri + uri
+ body = None
+ offset = 0
+
+ snapshots = []
+ while True:
+ parames = {
+ 'limit': self.limit,
+ 'offset': offset}
+ appendix = self._joined_params(parames)
+ new_url = self._generate_url(path, appendix)
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ total = int(body.get('total') or 0)
+ if total > 0:
+ res = body.get('snapshots')
+ snapshots += res
+ offset += len(res)
+ if offset >= total or len(snapshots) >= total or len(
+ res) < self.limit:
+ break
+ else:
+ break
+
+ return snapshots
+
+ def create_volumesnapshot(self, **kwargs):
+ '''create_volumesnapshot
+
+ 'create_volumesnapshot': ('POST',
+ ('/volumesnapshots', None, None, None),
+ {},
+ {'volumeUrn': kwargs.get('vol_urn'),
+ 'snapshotUuid': kwargs.get('uuid'),
+ },
+ False),
+ '''
+ LOG.info(_("[VRM-CINDER] start create_volumesnapshot()"))
+ uri = '/volumesnapshots'
+ method = 'POST'
+ path = self.site_uri + uri
+ body = {
+ 'volumeUrn': kwargs.get('vol_urn'),
+ 'snapshotUuid': kwargs.get('snapshot_uuid')
+ }
+ enable_active = kwargs.get('enable_active', None)
+ if enable_active is not None:
+ body.update({'enableActive': enable_active})
+ new_url = self._generate_url(path)
+
+ number = self.number
+ sleep_time = self.sleep_time
+ while(number >= 0):
+ try:
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ task_uri = body.get('taskUri')
+ if task_uri is not None:
+ self.task_proxy.wait_task(task_uri=task_uri)
+ return body
+ except driver_exception.ClientException as ex:
+ LOG.info(_("[VRM-CINDER] create volumesnapshot (%s)"),
+ ex.errorCode)
+ if ex.errorCode not in self.create_retry:
+ raise ex
+ LOG.debug(
+ '[VRM-CINDER] The errorcode is in retry list:%d' % number)
+ number -= 1
+ if number < 0:
+ raise ex
+ sleep(sleep_time)
+
+ def active_snapshots(self, **kwargs):
+ '''active_snapshots
+
+ 'active_snapshots': ('POST',
+ ('/volumesnapshots/enableSnapshots',
+ None, None, None),
+ {},
+ {'snapshotList': kwargs.get('snapshot_urns')},
+ False),
+ '''
+ LOG.info(_("[VRM-CINDER] start active_snapshots()"))
+ uri = '/volumesnapshots/enableSnapshots'
+ method = 'POST'
+ path = self.site_uri + uri
+ body = {
+ 'snapshotUuidList': kwargs.get('snapshot_uuids')
+ }
+ new_url = self._generate_url(path)
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+ task_uri = body.get('taskUri')
+ if task_uri is not None:
+ self.task_proxy.wait_task(task_uri=task_uri)
+ return body
+
+ def delete_volumesnapshot(self, **kwargs):
+ '''delete_volumesnapshot
+
+ 'delete_volumesnapshot': ('DELETE',
+ ('/volumesnapshots', None, None,
+ kwargs.get('id')),
+ {},
+ {
+ 'snapshotUuid': kwargs.get('snapshotUuid'),
+ },
+ True),
+ '''
+ LOG.info(_("[VRM-CINDER] start delete_volumesnapshot()"))
+ uri = '/volumesnapshots'
+ method = 'DELETE'
+ path = self.site_uri + uri + '/' + kwargs.get('id')
+ body = {
+ 'volumeUrn': kwargs.get('vol_urn'),
+ 'snapshotUuid': kwargs.get('snapshot_uuid')}
+ new_url = self._generate_url(path)
+
+ number = self.number
+ sleep_time = self.sleep_time
+ while(number >= 0):
+ try:
+ resp, body = self.vrmhttpclient.request(new_url, method)
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+ break
+ except driver_exception.ClientException as ex:
+ LOG.info(_("[VRM-CINDER] delete volumesnapshot (%s)"),
+ ex.errorCode)
+
+ normal_errorcode = ['10300026', '10300057', '10430052',
+ '10430054', '10430056', '10300421',
+ '10300318', '10300162', '10300153',
+ '10300170', '10000009', '10300421',
+ '10420138']
+ if ex.errorCode in normal_errorcode:
+ LOG.error(_(
+ "[VRM-CINDER] snapshot status conflicts, "
+ "try delete later."))
+ raise cinder_exception.SnapshotIsBusy(
+ snapshot_name=kwargs.get('snapshot_uuid'))
+
+ if ex.errorCode not in self.delete_retry:
+ raise ex
+ LOG.info(
+ '[VRM-CINDER] The errorcode is in retry list, number:%d' %
+ number)
+ number -= 1
+ if number < 0:
+ raise ex
+ sleep(sleep_time)
+
+ def create_volume_from_snapshot(self, **kwargs):
+ '''create_volume_from_snapshot
+
+ 'createvolumefromsnapshot': ('POST',
+ ('/volumesnapshots', None, "createvol",
+ None),
+ {},
+ {'snapshotUuid': kwargs.get('uuid'),
+ 'volumeName': kwargs.get('name'),
+ 'volumeType': normal/share
+ 'volumeUuid': uuid
+ 'snapshotVolumeType': 0/1
+ },
+ True),
+ '''
+ LOG.info(_("[VRM-CINDER] start createvolumefromsnapshot()"))
+ uri = '/volumesnapshots/createvol'
+ method = 'POST'
+ path = self.site_uri + uri
+ snapshotVolumeType = 0
+ if str(kwargs.get('full_clone')) == '0':
+ snapshotVolumeType = 1
+ body = {
+ 'snapshotUuid': kwargs.get('snapshot_uuid'),
+ 'volumeName': kwargs.get('volume_name'),
+ 'volumeType': kwargs.get('type'),
+ 'volumeUuid': kwargs.get('volume_uuid'),
+ 'snapshotVolumeType': snapshotVolumeType,
+ }
+ if kwargs.get('volume_size') is not None:
+ body.update({'volumeSize': kwargs.get('volume_size')})
+ new_url = self._generate_url(path)
+ resp, body = self.vrmhttpclient.request(new_url, method,
+ body=json.dumps(body))
+
+ task_uri = body.get('taskUri')
+ self.task_proxy.wait_task(task_uri=task_uri)
+ return body
diff --git a/cinder/volume/drivers/huawei/fusioncompute/vrm_commands.py b/cinder/volume/drivers/huawei/fusioncompute/vrm_commands.py
new file mode 100644
index 0000000..e4b4ef5
--- /dev/null
+++ b/cinder/volume/drivers/huawei/fusioncompute/vrm_commands.py
@@ -0,0 +1,441 @@
+# Copyright 2016 Huawei Technologies Co.,LTD.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import urlparse
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from cinder import exception as cinder_exception
+from cinder.i18n import _
+from cinder.volume.drivers.huawei.vrm.conf import FC_DRIVER_CONF
+from cinder.volume.drivers.huawei.vrm import exception as driver_exception
+
+TASK_WAITING = 'waiting'
+TASK_RUNNING = 'running'
+TASK_SUCCESS = 'success'
+TASK_FAILED = 'failed'
+TASK_CANCELLING = 'cancelling'
+TASK_UNKNOWN = 'unknown'
+
+CONF = cfg.CONF
+
+
+LOG = logging.getLogger(__name__)
+
+
+class VRM_COMMANDS(object):
+ USER_AGENT = 'VRM-HTTP-Client for OpenStack'
+ RESOURCE_URI = 'uri'
+ TASK_URI = 'taskUri'
+ BASIC_URI = '/service'
+ glanceServer_host = ''
+ glanceServer_port = ''
+ version = CONF.vrm_version
+ site_uri = ''
+
+ def __init__(self):
+ self.site_urn = None
+ self.site_uri = None
+
+ glanceServer_Info = str(CONF.glance_api_servers).split(':')
+ LOG.info("glance_api_servers is [%s]", CONF.glance_api_servers)
+ LOG.info("sxmatch glanceServer_Info is %s", glanceServer_Info)
+ self.glanceServer_host =\
+ glanceServer_Info[0] + ":" + glanceServer_Info[1]
+ self.glanceServer_host = self.glanceServer_host.replace("['", '')
+ LOG.info("sxmatch glanceServer_host is %s", self.glanceServer_host)
+ self.glanceServer_port = glanceServer_Info[1].replace("']", '')
+ LOG.info("sxmatch glanceServer_port is %s", self.glanceServer_port)
+
+ """Generate command to be sent to VRM."""
+
+ def _joined_params(self, params):
+ param_str = []
+ for k, v in params.items():
+ if (k is None) or (v is None) or len(k) == 0:
+ continue
+ if k == 'scope' and v == self.site_urn:
+ continue
+ param_str.append("%s=%s" % (k, str(v)))
+ return '&'.join(param_str)
+
+ def _joined_body(self, params):
+ param_str = []
+ for k, v in params.items():
+ if k is None or v is None or \
+ len(k) == 0:
+ continue
+
+ if type(v) in [int]:
+ param_str.append('"%s":%s' % (k, str(v)))
+ elif type(v) in [str, unicode]:
+ param_str.append('"%s":"%s"' % (k, str(v)))
+ elif type(v) in [bool]:
+ param_str.append('"%s":%s' % (k, str(v).lower()))
+ elif type(v) in [dict]:
+ param_str1 = json.dumps(v)
+ param_str.append('"%s":%s' % (k, param_str1))
+ else:
+ pass
+
+ if len(param_str) > 0:
+ return '{' + ','.join(param_str) + '}'
+ else:
+ return None
+
+ def _combine_vmConfig(self, **kwargs):
+ cpu_quantity = 1
+ mem_quantityMB = 1024
+ datastoreUrn = CONF.vrm_sm_datastoreurn
+ disk_quantityGB = 10
+ image_size = 10
+
+ if image_size > disk_quantityGB:
+ LOG.error(_("image is larger than sys-vol."))
+ raise cinder_exception.ImageTooLarge
+
+ cpu = {'quantity': cpu_quantity}
+ memory = {'quantityMB': mem_quantityMB}
+ disks = [
+ {
+ 'datastoreUrn': datastoreUrn,
+ 'quantityGB': disk_quantityGB,
+ 'volType': 0,
+ 'sequenceNum': 1,
+ }
+ ]
+ properties = {
+ 'isEnableHa': True,
+ 'reoverByHost': False,
+ 'isEnableFt': False
+ }
+
+ vmConfigBody = {
+ 'cpu': cpu,
+ 'memory': memory,
+ 'disks': disks,
+ 'properties': properties,
+ }
+ return vmConfigBody
+
+ def _combine_os_options(self, **kwargs):
+ osOptions = {
+ 'osType': 'Windows',
+ 'osVersion': 26
+ }
+ return osOptions
+
+ def init_site(self, uri, urn):
+ self.site_uri = uri
+ self.site_urn = urn
+ return
+
+ def _generate_url(self, path, query=None, frag=None):
+ LOG.info(_("[BRM-DRIVER] call _generate_url() "))
+ if CONF.vrm_ssl:
+ scheme = 'https'
+ else:
+ scheme = 'http'
+ fc_ip = FC_DRIVER_CONF.fc_ip
+ netloc = str(fc_ip) + ':' + str(CONF.vrm_port)
+ if path.startswith(self.BASIC_URI):
+ url = urlparse.urlunsplit((scheme, netloc, path, query, frag))
+ else:
+ url = urlparse.urlunsplit(
+ (scheme, netloc, self.BASIC_URI + str(path), query, frag))
+
+ return url
+
+ def generate_vrm_cmd(self, cmd, **kwargs):
+ COMMANDS = {
+ 'v5.1': {
+ 'list_tasks': ('GET',
+ ('/tasks', kwargs.get(self.RESOURCE_URI), None,
+ None),
+ {},
+ {},
+ False),
+ 'list_hosts': ('GET',
+ ('/hosts', kwargs.get(self.RESOURCE_URI), None,
+ None),
+ {'limit': kwargs.get('limit'),
+ 'offset': kwargs.get('offset'),
+ 'scope': kwargs.get('scope')},
+ {},
+ False),
+ 'list_datastores': ('GET',
+ ('/datastores',
+ kwargs.get(self.RESOURCE_URI), None,
+ None),
+ {'limit': kwargs.get('limit'),
+ 'offset': kwargs.get('offset'),
+ 'scope': kwargs.get('scope')},
+ {},
+ False),
+ 'list_volumes': ('GET',
+ ('/volumes', kwargs.get(self.RESOURCE_URI),
+ None, kwargs.get('id')),
+ {'limit': kwargs.get('limit'),
+ 'offset': kwargs.get('offset'),
+ 'scope': kwargs.get('scope')},
+ {},
+ False),
+ 'create_volume': ('POST',
+ ('/volumes', None, None, None),
+ {},
+ {'name': kwargs.get('name'),
+ 'quantityGB': kwargs.get('quantityGB'),
+ 'datastoreUrn': kwargs.get('datastoreUrn'),
+ 'uuid': kwargs.get('uuid'),
+ 'isThin': kwargs.get('isThin'),
+ 'type': kwargs.get('type'),
+ 'indepDisk': kwargs.get('indepDisk'),
+ 'persistentDisk': kwargs.get(
+ 'persistentDisk'),
+ 'volumeId': kwargs.get('volumeId')},
+ True),
+ 'delete_volume': ('DELETE',
+ ('/volumes', kwargs.get(self.RESOURCE_URI),
+ None, None),
+ {},
+ {},
+ True),
+
+ 'list_volumesnapshot': ('GET',
+ ('/volumesnapshots', None,
+ kwargs.get('uuid'), None),
+ {'limit': kwargs.get('limit'),
+ 'offset': kwargs.get('offset'),
+ 'scope': kwargs.get('scope')},
+ {},
+ False),
+ 'create_volumesnapshot': ('POST',
+ ('/volumesnapshots', None, None,
+ None),
+ {},
+ {'volumeUrn': kwargs.get('vol_urn'),
+ 'snapshotUuid': kwargs.get('uuid')},
+ False),
+ 'delete_volumesnapshot': ('DELETE',
+ ('/volumesnapshots', None, None,
+ kwargs.get('id')),
+ {},
+ {
+ 'snapshotUuid': kwargs.get(
+ 'snapshotUuid')
+ },
+ True),
+ 'createvolumefromsnapshot': ('POST',
+ ('/volumesnapshots', None,
+ "createvol", None),
+ {},
+ {'snapshotUuid': kwargs.get(
+ 'uuid'),
+ 'volumeName':
+ kwargs.get('name')},
+ True),
+ 'clone_volume': ('POST',
+ ('/volumes', None, kwargs.get('src_name'),
+ 'action/copyVol'),
+ {},
+ {'destinationVolumeID': kwargs.get(
+ 'dest_name')},
+ True),
+ 'copy_image_to_volume': ('POST',
+ ('/volumes/imagetovolume', None, None,
+ None),
+ {},
+ {
+ 'volumePara': {
+ 'quantityGB': kwargs.get(
+ 'volume_size'),
+ 'urn': kwargs.get(
+ 'volume_urn')
+ },
+ 'imagePara': {
+ 'id': kwargs.get('image_id'),
+ 'url': kwargs.get(
+ 'image_location')
+ },
+ 'location': kwargs.get(
+ 'host_urn'),
+ 'needCreateVolume': False
+ },
+ True),
+ 'copy_volume_to_image': ('POST',
+ ('/volumes/volumetoimage', None, None,
+ None),
+ {},
+ {
+ 'volumePara': {'urn': kwargs.get(
+ 'volume_urn'),
+ 'quantityGB': kwargs.get(
+ 'volume_size')},
+ 'imagePara': {
+ 'id': kwargs.get('image_id'),
+ 'url': kwargs.get(
+ 'image_url')}
+ },
+ True),
+ 'import_vm_from_image': ('POST',
+ ('/vms/action/import', None, None,
+ None),
+ {},
+ dict({
+ 'name': 'name',
+ 'location': kwargs.get(
+ 'host_urn'),
+ 'autoBoot': 'false',
+ 'url': kwargs.get('url'),
+ 'protocol': 'nfs',
+ 'vmConfig': {
+ 'cpu': {
+ 'quantity': 1
+ },
+ 'memory': {
+ 'quantityMB': 1024
+ },
+ 'disks': [
+ {
+ 'pciType': 'IDE',
+ 'datastoreUrn':
+ kwargs.get(
+ 'ds_urn'),
+ 'quantityGB':
+ kwargs.get(
+ 'vol_size'),
+ 'volType': 0,
+ 'sequenceNum': 1},
+ ]
+ },
+ 'osOptions': {
+ 'osType': 'Windows',
+ 'osVersion': 32
+ }
+ }),
+ True),
+ 'detach_vol_from_vm': ('POST',
+ ('/vms', None, kwargs.get('vm_id'),
+ 'action/detachvol'),
+ {},
+ {'volUrn': kwargs.get('volUrn')},
+ True),
+ 'stop_vm': ('POST',
+ ('/vms', None, kwargs.get('vm_id'), 'action/stop'),
+ {},
+ {'mode': kwargs.get('mode')},
+ True),
+ 'delete_vm': ('DELETE',
+ ('/vms', None, kwargs.get('vm_id'), None),
+ {},
+ {},
+ True),
+ 'query_vm': ('GET',
+ ('/vms', None, kwargs.get('vm_id'), None),
+ {},
+ {},
+ False),
+ 'list_templates': ('GET',
+ ('/vms', None, None, None),
+ {'limit': kwargs.get('limit'),
+ 'offset': kwargs.get('offset'),
+ 'scope': kwargs.get('scope'),
+ 'isTemplate': 'true'},
+ {},
+ False),
+ 'clone_vm': ('POST',
+ ('/vms', None, kwargs.get('template_id'),
+ 'action/clone'),
+ {},
+ dict({
+ "name": "cinder-plugin-temp-vm",
+ "description": "cinder-plugin-temp-vm",
+ "isLinkClone": kwargs.get('linked_clone'),
+ 'location': kwargs.get('host_urn'),
+ 'autoBoot': 'false',
+ 'vmConfig':
+ {
+ 'cpu': {
+ 'quantity': 2
+ },
+ 'memory':
+ {
+ 'quantityMB': 1024
+ },
+ 'disks':
+ [{
+ 'pciType': 'IDE',
+ 'datastoreUrn': kwargs.get('ds_urn'),
+ 'quantityGB': kwargs.get(
+ 'volume_size'),
+ 'volType': 0,
+ 'sequenceNum': 1,
+ 'isThin': kwargs.get('is_thin')
+ }]
+ },
+ }),
+ True),
+ },
+
+ 'v2.0': {}
+ }
+
+ path = query = body = None
+
+ LOG.info("[BRM-DRIVER] version is [%s]", self.version)
+ if self.version not in COMMANDS.keys():
+ raise driver_exception.UnsupportedVersion()
+ else:
+ commands = COMMANDS[self.version]
+
+ if cmd not in commands.keys():
+ raise driver_exception.UnsupportedCommand()
+ else:
+ (method, pathparams, queryparams, bodyparams, hastask) = commands[
+ cmd]
+
+ resource, resource_uri, tag1, tag2 = pathparams
+ if resource_uri:
+ path = resource_uri
+ LOG.info(_("[VRM-CINDER] [%s]"), path)
+ else:
+ path = self.site_uri + resource
+ LOG.info(_("[VRM-CINDER] [%s]"), path)
+ if tag1:
+ path += ('/' + str(tag1))
+ LOG.info(_("[VRM-CINDER] [%s]"), path)
+ if tag2:
+ path += ('/' + str(tag2))
+ LOG.info(_("[VRM-CINDER] [%s]"), path)
+
+ if method == 'GET':
+ query = self._joined_params(queryparams)
+ elif method == 'DELETE':
+ query = self._joined_params(queryparams)
+ elif method == 'POST':
+ query = self._joined_params(queryparams)
+ LOG.info("[BRM-DRIVER] _generate_vrm_cmd bodyparams is [%s]",
+ bodyparams)
+ body = json.dumps(bodyparams)
+ LOG.info("[BRM-DRIVER] _generate_vrm_cmd body is [%s]", body)
+ else:
+ raise cinder_exception.UnknownCmd(cmd=method)
+
+ url = self._generate_url(path, query)
+ LOG.info("[BRM-DRIVER] _generate_vrm_cmd url is [%s]", url)
+
+ return (method, url, body, hastask)
diff --git a/cinder/volume/drivers/huawei/fusioncompute/vrm_driver.py b/cinder/volume/drivers/huawei/fusioncompute/vrm_driver.py
new file mode 100644
index 0000000..7e8daab
--- /dev/null
+++ b/cinder/volume/drivers/huawei/fusioncompute/vrm_driver.py
@@ -0,0 +1,2355 @@
+# Copyright 2016 Huawei Technologies Co.,LTD.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import math
+import os
+import random
+
+from cinder import context as cinder_context
+from cinder import exception
+from cinder.i18n import _
+from cinder import volume
+from cinder.volume import driver
+from cinder.volume.drivers.huawei.vrm.cluster_proxy import ClusterProxy
+from cinder.volume.drivers.huawei.vrm.conf import FC_DRIVER_CONF
+from cinder.volume.drivers.huawei.vrm.datastore_proxy import DatastoreProxy
+from cinder.volume.drivers.huawei.vrm import exception as driver_exception
+from cinder.volume.drivers.huawei.vrm.host_proxy import HostProxy
+from cinder.volume.drivers.huawei.vrm.http_client import VRMHTTPClient
+from cinder.volume.drivers.huawei.vrm.vm_proxy import VmProxy
+from cinder.volume.drivers.huawei.vrm.volume_proxy import VolumeProxy
+from cinder.volume.drivers.huawei.vrm.volume_snapshot_proxy import \
+ VolumeSnapshotProxy
+from cinder.volume import utils as volume_utils
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+backend_opts = [
+ cfg.StrOpt('volume_driver',
+ default='cinder.volume.drivers.huawei.vrm.vrm_driver.VRMDriver',
+ help='Driver to use for volume creation'),
+]
+
+
+def metadata_to_dict(metadata):
+ result = {}
+ for item in metadata:
+ if not item.get('deleted'):
+ result[item['key']] = item['value']
+ return result
+
+
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+
+fc_plugin_conf = [
+ cfg.StrOpt('vrm_config_file',
+ default=None,
+ help='vrm_config_file'),
+ cfg.BoolOpt('vrm_thin_provision',
+ default=False,
+ help='Switch of thin provisioning support'),
+ cfg.FloatOpt('vrm_over_ratio',
+ default=1.0,
+ help='Ratio of thin provisioning'),
+ cfg.IntOpt('vrm_reserved_percentage',
+ default=0,
+ help='Reserved percentage of the backend volumes'),
+ cfg.ListOpt('vrm_ds_name',
+ default=[],
+ help='vrm_ds_name'),
+ cfg.ListOpt('current_list',
+ default=[],
+ help='current_list'),
+ cfg.IntOpt('affine_rate',
+ default=1,
+ help='affine_rate'),
+]
+
+
+class VRMDriver(driver.VolumeDriver):
+ VENDOR = 'Huawei'
+ BACKEND = 'VRM'
+ VERSION = 'v1.1'
+
+ def __init__(self, *args, **kwargs):
+ '''__init__
+
+ __init__
+
+ :param args:
+ :param kwargs:
+ :return:
+ '''
+ super(VRMDriver, self).__init__(*args, **kwargs)
+ LOG.info(_("[VRM-CINDER] start VRMDriver __init__()"))
+
+ self.context = None
+ self.volume_api = volume.API()
+ self.SHARED_HOSTS = []
+ self.SHARED_DATASTORES = []
+ self.SHARED_VOLUMES = []
+
+ self.LAST_SHARED_HOSTS = self.SHARED_HOSTS
+ self.LAST_SHARED_DATASTORES = self.SHARED_DATASTORES
+ self.LAST_SHARED_VOLUMES = self.SHARED_VOLUMES
+ self.left_periodrate = CONF.vrm_sm_periodrate
+
+ if self.configuration:
+ LOG.info(_("[VRM-CINDER] append configuration"))
+ self.configuration.append_config_values(fc_plugin_conf)
+ else:
+ LOG.info(_("[VRM-CINDER] no configuration exception"))
+ raise driver_exception.NoNeededData
+
+ over_ratio = self.configuration.get('vrm_over_ratio')
+ if over_ratio is None:
+ self.over_ratio = 1.0
+ else:
+ LOG.info(_("[VRM-CINDER] super_ratio [%s]"), over_ratio)
+ self.over_ratio = over_ratio
+
+ thin_provision = self.configuration.vrm_thin_provision
+ if thin_provision is not None:
+ self.thin_provision = False
+ else:
+ LOG.info(_("[VRM-CINDER] thin_provision [%s]"), thin_provision)
+ self.thin_provision = bool(thin_provision)
+
+ reserved_percentage = self.configuration.get('vrm_reserved_percentage')
+ if reserved_percentage is None:
+ self.reserved_percentage = 0
+ else:
+ LOG.info(_("[VRM-CINDER] reserved_percentage [%s]"),
+ reserved_percentage)
+ self.reserved_percentage = int(reserved_percentage)
+
+ self.pool_list = self.configuration.get('vrm_ds_name')
+ if not self.pool_list:
+ LOG.error(_("[VRM-CINDER] vrm_ds_name is None exception"))
+ raise driver_exception.NoNeededData
+ self.pool_list = list(set(self.pool_list))
+
+ self.current_list = self.configuration.get('current_list')
+ self.affine_rate = self.configuration.get('affine_rate')
+
+ self.volume_proxy = VolumeProxy()
+ self.vm_proxy = VmProxy()
+ self.volume_snapshot_proxy = VolumeSnapshotProxy()
+ self.host_proxy = HostProxy()
+ self.cluster_proxy = ClusterProxy()
+ self.datastore_proxy = DatastoreProxy()
+ self.vrmhttpclient = VRMHTTPClient()
+
+ self.site_urn = None
+ self.site_uri = None
+ self.cluster_urn = None
+
+ self.shared_hosts = []
+ self.shared_datastores = []
+ self.shared_volumes = []
+ self.auth_token = None
+ self.log_count = 0
+
+ LOG.info(_("[VRM-CINDER] end __init__()"))
+
+ def _get_host_datastore_vol(self):
+ '''_get_host_datastore_vol
+
+ get_host_datastore_vol
+
+ :return:
+ '''
+ self.shared_hosts = []
+ self.shared_datastores = []
+ self.shared_volumes = []
+
+ self.shared_hosts = self.host_proxy.list_host()
+ hosturns = sorted([host['urn'] for host in self.shared_hosts])
+ hosturns_set = set(hosturns)
+
+ datastores = self.datastore_proxy.list_datastore()
+ sharetypes = [t.lower() for t in FC_DRIVER_CONF.vrm_ds_types]
+ for datastore in datastores:
+ storage_type = datastore['storageType'].lower()
+ ds_name = datastore['name']
+ if self.pool_list is not None:
+ if ds_name in self.pool_list:
+ # LOG.info(_("[VRM-CINDER] get the vrm_sm_datastore_name"))
+ self.shared_datastores.append(datastore)
+ else:
+ # LOG.info(_("vrm_sm_datastore_name useless"))
+ continue
+
+ if storage_type in sharetypes:
+ hosts = sorted(datastore['hosts'])
+ hosts_set = set(hosts)
+
+ # LOG.info(_("hosts_set %s") % hosts_set)
+ if FC_DRIVER_CONF.vrm_ds_hosts_share:
+ if len(hosturns_set - hosts_set) == 0:
+ # LOG.info(_("[VRM-CINDER] append ds share"))
+ self.shared_datastores.append(datastore)
+ else:
+ if len(hosturns_set & hosts_set) > 0:
+ LOG.info(_("[VRM-CINDER] append ds"))
+ self.shared_datastores.append(datastore)
+
+ if len(self.shared_datastores) <= 0:
+ LOG.info(_("[VRM-CINDER] can not found any shared datastores "))
+ raise driver_exception.NotFound()
+
+# LOG.info(_("[VRM-CINDER] end get_host_datastore_vol()"))
+ return (
+ self.shared_hosts, self.shared_datastores, self.shared_volumes)
+
+ def _refresh_storage_info(self, refresh=False):
+ '''_refresh_storage_info
+
+ _refresh_storage_info
+
+ :param refresh:
+ :return:
+ '''
+# LOG.info(_("[BRM-DRIVER] start _refresh_storage_info(%s) "), refresh)
+ if refresh is True:
+ self.LAST_SHARED_HOSTS = self.SHARED_HOSTS
+ self.LAST_SHARED_DATASTORES = self.SHARED_DATASTORES
+ self.LAST_SHARED_VOLUMES = self.SHARED_VOLUMES
+
+ self.SHARED_HOSTS, self.SHARED_DATASTORES, self.SHARED_VOLUMES = \
+ self._get_host_datastore_vol()
+ self.left_periodrate = CONF.vrm_sm_periodrate
+
+ self.log_count = self.log_count + 1
+ if self.log_count >= 20:
+ LOG.info(_("[CINDER-BRM] refreshed shared hosts :[ %s ]"),
+ self.SHARED_HOSTS)
+ LOG.info(_("[CINDER-BRM] refreshed shared datastores :[ %s ]"),
+ self.SHARED_DATASTORES)
+ self.log_count = 0
+# LOG.info(_("[BRM-DRIVER] end _refresh_storage_info(%s) "), refresh)
+
+ def do_setup(self, context):
+ '''do_setup
+
+ do_setup
+
+ :param context:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start do_setup() "))
+ LOG.info(_("[VRM-CINDER] del environ http_proxy https_proxy"))
+ if os.getenv('http_proxy'):
+ LOG.info(_("[VRM-CINDER] del environ http_proxy"))
+ del os.environ['http_proxy']
+ if os.getenv('https_proxy'):
+ LOG.info(_("[VRM-CINDER] del environ https_proxy"))
+ del os.environ['https_proxy']
+ self.context = context
+ self.vrmhttpclient.init()
+ self.site_urn = self.vrmhttpclient.get_siteurn()
+ self._refresh_storage_info(True)
+ clusters = self.cluster_proxy.list_cluster()
+ self.cluster_urn = clusters[0].get('urn')
+ LOG.info(_("[CINDER-BRM] end do_setup"))
+
+ def check_for_setup_error(self):
+ '''check_for_setup_error
+
+ check_for_setup_error
+
+ :return:
+ '''
+ # LOG.info(_("[BRM-DRIVER] start check_for_setup_error() "))
+ if len(self.SHARED_HOSTS) == 0 or len(self.SHARED_DATASTORES) == 0:
+ LOG.info(_(
+ "[CINDER-BRM] check_for_setup_error, "
+ "shared datasotre not found"))
+ raise driver_exception.NoNeededData
+
+ def _build_volume_stats(self):
+ '''_build_volume_stats
+
+ '''
+ # LOG.info(_("[BRM-DRIVER] start _build_volume_stats() "))
+ stats = {}
+ stats["pools"] = []
+ stats['driver_version'] = self.VERSION
+ stats['storage_protocol'] = 'VRM'
+ stats['vendor_name'] = self.VENDOR
+
+ backend = self.configuration.get('volume_backend_name')
+ if backend is None:
+ stats['volume_backend_name'] = self.BACKEND
+ else:
+ stats['volume_backend_name'] = self.configuration.get(
+ 'volume_backend_name')
+
+ return stats
+
+ def _try_get_volume_stats(self, refresh=False):
+ '''_try_get_volume_stats
+
+ _try_get_volume_stats
+
+ :param refresh:If 'refresh' is True, run the update first.
+ :return:Return the current state of the volume service.
+ '''
+# LOG.info(_("[BRM-DRIVER] start _try_get_volume_stats() "))
+ if refresh:
+ self.left_periodrate -= 1
+ if self.left_periodrate <= 0:
+ self._refresh_storage_info(refresh)
+
+ stats = self._build_volume_stats()
+ ds_meta = {}
+ ds_names = [ds['name'] for ds in self.SHARED_DATASTORES]
+ for pool in self.pool_list:
+ if pool not in ds_names:
+ continue
+ new_pool = {}
+ ds_meta['ds_name'] = pool
+ datastore = self._choose_datastore(ds_meta)
+ if datastore.get('storageType') == 'advanceSan' and datastore.get(
+ 'version') is not None:
+ new_pool.update(dict(consistencygroup_support=True))
+ if 'NORMAL' != datastore['status']:
+ new_pool.update(dict(
+ pool_name=pool,
+ free_capacity_gb=0,
+ reserved_percentage=self.reserved_percentage,
+ total_capacity_gb=0,
+ provisioned_capacity_gb=0,
+ max_over_subscription_ratio=self.over_ratio,
+ affine_rate=1
+ ))
+ stats["pools"].append(new_pool)
+ continue
+
+ if self.current_list is not None and pool in self.current_list:
+ new_pool.update(dict(
+ pool_name=pool,
+ free_capacity_gb=datastore['freeSizeGB'],
+ reserved_percentage=self.reserved_percentage,
+ total_capacity_gb=datastore['capacityGB'],
+ provisioned_capacity_gb=datastore['usedSizeGB'],
+ max_over_subscription_ratio=self.over_ratio,
+ affine_rate=self.affine_rate
+ ))
+ else:
+ new_pool.update(dict(
+ pool_name=pool,
+ free_capacity_gb=datastore['freeSizeGB'],
+ reserved_percentage=self.reserved_percentage,
+ total_capacity_gb=datastore['capacityGB'],
+ provisioned_capacity_gb=datastore['usedSizeGB'],
+ max_over_subscription_ratio=self.over_ratio,
+ affine_rate=1
+ ))
+ if self.thin_provision is True:
+ new_pool.update(dict(
+ thin_provisioning_support=True,
+ thick_provisioning_support=False
+ ))
+ else:
+ new_pool.update(dict(
+ thin_provisioning_support=False,
+ thick_provisioning_support=True
+ ))
+ tier_size = datastore.get('tierSize', None)
+ type_v3 = []
+ if tier_size and len(tier_size) >= 3:
+ if tier_size[0] > 0:
+ type_v3.append('ssd')
+ if tier_size[1] > 0:
+ type_v3.append('sas')
+ if tier_size[2] > 0:
+ type_v3.append('nl_sas')
+ if len(type_v3) > 0:
+ type_v3_str = ';'.join(type_v3)
+ LOG.info(_("[CINDER-BRM] type of v3 is %s"), type_v3_str)
+ new_pool.update(dict(type=type_v3_str))
+ stats["pools"].append(new_pool)
+
+ # LOG.info(_("[CINDER-BRM] (%d)--%s"), self.left_periodrate, stats)
+
+ return stats
+
+ def get_volume_stats(self, refresh=False):
+ '''get_volume_stats
+
+ get_volume_stats
+
+ :param refresh:If 'refresh' is True, run the update first.
+ :return:Return the current state of the volume service.
+ '''
+# LOG.info(_("[BRM-DRIVER] start get_volume_stats() "))
+ try:
+ stats = self._try_get_volume_stats(refresh)
+ except Exception as ex:
+ LOG.info(_("[CINDER-BRM] get volume stats Exception (%s)"), ex)
+ stats = self._build_volume_stats()
+ return stats
+
+ def check_and_modify_thin(self, ds_urn, thin):
+ '''check_and_modify_thin
+
+ [ DSWARE] /[LOCAL, SAN, LUN]
+ :param ds_urn:
+ :param thin:
+ :return:
+ '''
+ LOG.info(_("[CINDER-BRM] start check_and_modify_thin (%s)"), ds_urn)
+ for datastore in self.SHARED_DATASTORES:
+ # LOG.info(_("[CINDER-BRM] ds_urn (%s)"), ds_urn)
+ LOG.info(_("[CINDER-BRM] datastore (%s)"), datastore['urn'])
+ if datastore['urn'] == ds_urn:
+ ds_type = str(datastore['storageType']).upper()
+ LOG.info(_("[CINDER-BRM] ds_type (%s)"), ds_type)
+ if ds_type in ['LOCAL', 'SAN', 'LUN']:
+ LOG.info(_("[CINDER-BRM] return False (%s)"), ds_urn)
+ return False
+ if ds_type in ['DSWARE']:
+ LOG.info(_("[CINDER-BRM] return True (%s)"), ds_urn)
+ return True
+ return thin
+
+ def check_thin(self, datastore, thin):
+ '''check_thin
+
+ [ DSWARE] /[LOCAL, SAN, LUN]
+ :param ds_urn:
+ :param thin:
+ :return:
+ '''
+ # LOG.info(_("[CINDER-BRM] start check_thin (%s)"), datastore)
+ ds_type = str(datastore['storageType']).upper()
+ # LOG.info(_("[CINDER-BRM] ds_type (%s)"), ds_type)
+ if ds_type in ['LOCAL', 'SAN', 'LUN']:
+ # LOG.info(_("[CINDER-BRM] return False (%s)"), datastore)
+ return False
+ if ds_type in ['DSWARE']:
+ # LOG.info(_("[CINDER-BRM] return True (%s)"), datastore)
+ return True
+ return thin
+
+ def _check_and_choice_datastore(self, ds_meta):
+ '''_check_and_choice_datastore
+
+ _check_and_choice_datastore
+
+ :param ds_meta:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start _check_and_choice_datastore() "))
+ datastoreUrn = ds_meta['datastoreUrn']
+ hypervisorIp = ds_meta['hypervisorIp']
+ quantityGB = ds_meta['quantityGB']
+ storageType = ds_meta['storageType']
+ isThin = ds_meta['isThin']
+ hypervisorUrn = None
+ if hypervisorIp:
+ for host in self.SHARED_HOSTS:
+ if host['ip'].strip() == hypervisorIp.strip():
+ hypervisorUrn = host['urn']
+ break
+ if hypervisorUrn is None:
+ LOG.info(_("[CINDER-BRM] can not found hypervisorip=%s"),
+ hypervisorIp)
+ raise exception.HostNotFound(host=hypervisorIp)
+
+ if datastoreUrn:
+ for datastore in self.SHARED_DATASTORES:
+ if datastore['urn'] == datastoreUrn:
+ this_storageType = datastore['storageType']
+ this_isThin = datastore['isThin']
+ this_freeSizeGB = int(datastore['freeSizeGB'])
+ this_hosts = datastore['hosts']
+
+ if this_storageType.lower() == storageType.lower() and str(
+ this_isThin).lower() == str(
+ isThin).lower() and this_freeSizeGB > quantityGB:
+ if hypervisorUrn is None:
+ return datastore['urn']
+ elif hypervisorUrn in this_hosts:
+ return datastore['urn']
+ raise driver_exception.NotFound()
+ raise driver_exception.NotFound()
+ ds_hosts = None
+ ds_urn = None
+ random.shuffle(self.SHARED_DATASTORES)
+ for datastore in self.SHARED_DATASTORES:
+
+ this_isThin = datastore['isThin']
+ this_freeSizeGB = int(datastore['freeSizeGB'])
+ ds_hosts = datastore['hosts']
+ if this_freeSizeGB < quantityGB:
+ continue
+ if isThin is None:
+ ds_urn = datastore['urn']
+ break
+
+ elif str(this_isThin).lower() == str(isThin).lower():
+ ds_urn = datastore['urn']
+ break
+
+ if ds_urn is None:
+ raise driver_exception.NotFound()
+ random.shuffle(ds_hosts)
+ host_urn = ds_hosts[0]
+ return ds_urn, host_urn
+
+ def _choose_datastore(self, ds_meta):
+ '''_choose_datastore
+
+ _check_and_choice_datastore
+
+ :param ds_meta:
+ :return:
+ '''
+ for datastore in self.SHARED_DATASTORES:
+ if ds_meta['ds_name'] == datastore['name']:
+ return datastore
+
+ raise driver_exception.NotFound()
+
+ def _vrm_pack_provider_location(self, volume_body):
+ '''_vrm_pack_provider_location
+
+ _vrm_pack_provider_location
+
+ :param volume:
+ :param volume_body:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start _vrm_pack_provider_location() "))
+ fc_ip = FC_DRIVER_CONF.fc_ip
+
+ provider_location = ""
+ provider_location += ('addr=' + fc_ip + ':' + str(CONF.vrm_port) + ',')
+ provider_location += ('uri=' + volume_body.get('uri') + ',')
+ provider_location += ('urn=' + volume_body.get('urn') + ',')
+ provider_location += \
+ ('datastoreUrn=' + volume_body.get('datastoreUrn') + ',')
+ provider_location += ('isThin=' + str(volume_body.get('isThin')) + ',')
+ provider_location += \
+ ('storageType=' + volume_body.get('storageType') + ',')
+ provider_location += ('type=' + volume_body.get('type'))
+
+ return provider_location
+
+ def _vrm_unpack_provider_location(self, provider_location, key=None):
+ '''_vrm_unpack_provider_location
+
+ :param provider_location:
+ :param key:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start _vrm_unpack_provider_location() "))
+ kvalue = None
+ kvs = {}
+ if not isinstance(provider_location, None) and len(
+ provider_location) > 0:
+ items = provider_location.split(',')
+ for item in items:
+ (ki, eqi, vi) = item.partition('=')
+ kvs[ki] = vi
+ if key and key == ki:
+ kvalue = vi
+
+ return kvalue, kvs
+
+ def _vrm_get_volume_meta(self, id):
+ '''_vrm_create_volume
+
+ :param id:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start _vrm_get_volume_meta() "))
+ model_update = {}
+ metadata = {}
+
+ if id:
+ volume_body = self.volume_proxy.query_volume(id=id)
+ if volume_body is not None:
+ model_update[
+ 'provider_location'] = self._vrm_pack_provider_location(
+ volume_body)
+ sd_sn = volume_body.get('storageSN', None)
+ lun_id = volume_body.get('lunId', None)
+ replication_driver_data = {}
+ if lun_id:
+ LOG.info(_("[BRM-DRIVER] lun id is %s"), lun_id)
+ replication_driver_data.update({'lun_id': lun_id})
+ if sd_sn:
+ LOG.info(_("[BRM-DRIVER] sn is %s"), sd_sn)
+ replication_driver_data.update({'sn': sd_sn})
+ if len(replication_driver_data) > 0:
+ LOG.info(_("[BRM-DRIVER] replication_driver_data is %s"),
+ sd_sn)
+ model_update['replication_driver_data'] = json.dumps(
+ replication_driver_data)
+ urn = volume_body['urn']
+ uri = volume_body['uri']
+ storage_type = volume_body.get('storageType')
+ metadata.update({'urn': urn})
+ metadata.update({'uri': uri})
+ if storage_type is not None:
+ LOG.info(
+ _("[BRM-DRIVER] the storage type of volume is %s"),
+ storage_type)
+ storage_type = 'FC_' + storage_type
+ metadata.update({'StorageType': storage_type})
+ volInfoUrl = volume_body.get('volInfoUrl', None)
+ if volInfoUrl:
+ metadata.update({'quantityGB': volume_body['quantityGB']})
+ metadata.update({'volInfoUrl': volInfoUrl})
+ pvscsi_support = volume_body.get('pvscsiSupport')
+ if pvscsi_support == 1:
+ metadata.update({'hw:passthrough': 'true'})
+
+ return model_update, metadata
+
+ def create_consistencygroup(self, context, group):
+ LOG.info(_("[BRM-DRIVER] create consistencygroup. "))
+ return
+
+ def update_consistencygroup(self, context, group,
+ add_volumes=None, remove_volumes=None):
+ """Updates a consistency group."""
+ return None, None, None
+
+ def _vrm_delete_volume(self, volume):
+ '''_vrm_delete_volume
+
+ :param volume:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start _vrm_delete_volume() "))
+ provider_location = volume['provider_location']
+ if provider_location is None or provider_location == '':
+ LOG.error(_("[BRM-DRIVER]provider_location is null "))
+ vol_meta = volume.get('volume_metadata')
+ vol_meta_dict = metadata_to_dict(vol_meta)
+ vol_uri = vol_meta_dict.get('uri')
+ if vol_uri is not None:
+ fc_vol_urn = vol_meta_dict.get('urn')
+ fc_vol_id = fc_vol_urn[fc_vol_urn.rfind(':') + 1:]
+ self._check_replications(fc_vol_id)
+ self._vrm_delete_volume_vm(volume_urn=fc_vol_urn,
+ volume_uri=vol_uri)
+ return
+ fc_volume_urn, items = \
+ self._vrm_unpack_provider_location(volume['provider_location'],
+ 'urn')
+ fc_vol_id = fc_volume_urn[fc_volume_urn.rfind(':') + 1:]
+ self._check_replications(fc_vol_id)
+ volume_uri, items = \
+ self._vrm_unpack_provider_location(volume['provider_location'],
+ 'uri')
+ self._vrm_delete_volume_vm(volume_urn=fc_volume_urn,
+ volume_uri=volume_uri)
+
+ def _vrm_delete_volume_vm(self, volume_urn, volume_uri):
+ '''_vrm_delete_volume_vm
+
+ :param volume_urn:
+ :param volume_uri:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start _vrm_delete_volume_vm() "))
+ kwargs = {}
+ kwargs['volume_urn'] = volume_urn
+ vm_body = self.vm_proxy.query_vm_volume(**kwargs)
+ if vm_body is None:
+ LOG.info(_("[BRM-DRIVER]vm attached is zero "))
+ self.volume_proxy.delete_volume(volume_uri=volume_uri)
+ else:
+ vm_uri = vm_body.get('uri')
+ vm_status = vm_body.get('status')
+ kwargs['vm_id'] = vm_uri[-10:]
+ kwargs['mode'] = 'force'
+ if vm_status not in ['stopped']:
+ self.vm_proxy.stop_vm(**kwargs)
+ self.vm_proxy.detach_vol_from_vm(**kwargs)
+ self.volume_proxy.delete_volume(volume_uri=volume_uri)
+
+ def _check_quick_start(self, volume_id):
+ '''_check_quick_start
+
+ :param volume_id:
+ :return:
+ '''
+ admin_context = cinder_context.get_admin_context()
+ admin_metadata = self.db.volume_admin_metadata_get(admin_context,
+ volume_id)
+ quick_start = admin_metadata.get('quick_start')
+ if quick_start is not None and str(quick_start).lower() == 'true':
+ return True
+ else:
+ return False
+
+ def _check_replications(self, fc_vol_id):
+
+ if fc_vol_id:
+ try:
+ body = self.volume_proxy.query_volume_replications(
+ volume_id=fc_vol_id)
+ LOG.info(
+ _("[BRM-DRIVER] check replications : replications is %s"),
+ body.get('replications'))
+ except Exception as ex:
+ LOG.error(_("[BRM-DRIVER] get volume replications expection"))
+ LOG.exception(ex)
+ return
+ if body.get('replications'):
+ LOG.error(_(
+ "[BRM-DRIVER] The volume %s to deleted has replications, "
+ "could not be deleted ,extended or migrated."), fc_vol_id)
+ raise driver_exception.FusionComputeDriverException()
+
+ def delete_consistencygroup(self, context, group):
+
+ LOG.info(_("[BRM-DRIVER] enter delete_consistencygroup() "))
+ model_update = {}
+ model_update['status'] = 'deleted'
+ volumes = self.db.volume_get_all_by_group(context, group['id'])
+ if volumes:
+ for volume_ref in volumes:
+ try:
+ self.delete_volume(volume_ref)
+ volume_ref['status'] = 'deleted'
+ except Exception as ex:
+ LOG.error(
+ _("[BRM-DRIVER] delete_consistencygroup failed. "))
+ LOG.exception(ex)
+ volume_ref['status'] = 'error_deleting'
+ model_update['status'] = 'error_deleting'
+
+ LOG.info(_("[BRM-DRIVER] end delete_consistencygroup() "))
+
+ return model_update, volumes
+
+ def create_volume(self, volume):
+ '''create_volume
+
+ create_volume
+ {
+ "name":string,
+ quantityGB:integer,
+ datastoreUrn:string,
+ "isThin":boolean,
+ "type":string,
+ indepDisk:boolean,
+ persistentDisk:boolean
+ }
+
+ :param volume:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start create_volume() "))
+
+ vol_meta = volume.get('volume_metadata')
+ vol_meta_dict = metadata_to_dict(vol_meta)
+ linked_clone = vol_meta_dict.get('linked_clone')
+ if linked_clone is None:
+ linked_clone = False
+ elif str(linked_clone).lower() == 'true':
+ linked_clone = True
+ else:
+ linked_clone = False
+
+ # check quick start
+ if not linked_clone and self._check_quick_start(volume.get('id')):
+ linked_clone = True
+
+ if linked_clone:
+ LOG.info(_("[BRM-DRIVER] linked_clone volume. do nothing. "))
+ return
+
+ args_dict = {}
+
+ args_dict['name'] = volume['name']
+ args_dict['size'] = int(volume['size'])
+ args_dict['uuid'] = volume['id']
+ shareable = volume.get('shareable')
+ if shareable and True == shareable:
+ LOG.info(_("[CINDER-VRM] shareable"))
+ args_dict['type'] = 'share'
+
+ else:
+ args_dict['type'] = 'normal'
+
+ is_thin = FC_DRIVER_CONF.vrm_is_thin
+
+ ds_meta = {}
+ try:
+ ds_meta['ds_name'] = volume.get('host').split('#')[1]
+ except exception.CinderException as ex:
+ LOG.info(
+ _(
+ "[CINDER-BRM] host format exception, host is %s ") %
+ volume.get('host'))
+ raise ex
+
+ datastore = self._choose_datastore(ds_meta)
+ if datastore:
+ LOG.info(_("[CINDER-VRM] datastore [%s],"), datastore)
+ if str(datastore.get('storageType')).upper() in ['LUN']:
+ LOG.info(_("[CINDER-VRM] rdm disk [%s]"), volume['id'])
+ args_dict['size'] = int(datastore.get('capacityGB'))
+ args_dict['independent'] = True
+
+ args_dict['ds_urn'] = datastore.get('urn')
+ is_thin = self.check_thin(datastore, is_thin)
+ args_dict['is_thin'] = is_thin
+
+ context = cinder_context.get_admin_context()
+
+ volume_type_id = volume.get('volume_type_id')
+ if volume_type_id:
+ LOG.info(_("[BRM-DRIVER] query volume type id is %s"),
+ volume_type_id)
+ volume_type_extra_specs = self.db.volume_type_extra_specs_get(
+ context, volume_type_id)
+ LOG.info(_("[BRM-DRIVER] volume_type_extra_specs is %s"),
+ str(volume_type_extra_specs))
+ if volume_type_extra_specs and str(volume_type_extra_specs.get(
+ 'hw:passthrough')).lower() == 'true':
+ args_dict[
+ 'support_pvscsi'] = 1
+ support_pvscsi = vol_meta_dict.get('hw:passthrough')
+ if support_pvscsi and str(support_pvscsi).lower() == 'true':
+ args_dict[
+ 'support_pvscsi'] = 1
+
+ body = self.volume_proxy.create_volume(**args_dict)
+
+ temp_str = body.get('urn')
+ fc_vol_id = temp_str[temp_str.rfind(':') + 1:]
+ LOG.info(_("[CINDER-VRM] fc_vol_id [%s] ") % fc_vol_id)
+ model_update, metadata = self._vrm_get_volume_meta(id=fc_vol_id)
+ self.db.volume_metadata_update(context, volume['id'], metadata,
+ False)
+
+ self.volume_proxy.wait_task(**body)
+
+ try:
+ model_update, metadata = self._vrm_get_volume_meta(
+ id=fc_vol_id)
+ except Exception as ex:
+ LOG.error(
+ _(
+ "[CINDER-VRM] get volume information failed after "
+ "create volume.volume id is [%s] ") % fc_vol_id)
+ LOG.exception(ex)
+ return model_update
+ else:
+ raise exception.VolumeDriverException
+
+ def _register_volume(self, volume):
+ '''_register_volume
+
+ _register_volume
+
+ :param volume:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start _register_volume() "))
+ volume_metadata = volume['volume_metadata']
+ volume_metadata_dict = metadata_to_dict(volume_metadata)
+ model_update = {}
+
+ volume_urn = volume_metadata_dict.get('volumeUrn')
+ volume_id = volume_urn.split(':')[-1]
+ volume_body = self.volume_proxy.get_volume(vol_id=volume_id)
+ model_update['provider_location'] = self._vrm_pack_provider_location(
+ volume_body)
+ model_update['volume_urn'] = volume_body['urn']
+ return model_update
+
+ def delete_volume(self, volume):
+ '''delete_volume
+
+ delete_volume
+
+ :param volume:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start delete_volume() "))
+ self._vrm_delete_volume(volume)
+
+ def create_export(self, context, volume):
+ '''create_export
+
+ create_export
+
+ :param context:
+ :param volume:
+ :return:
+ '''
+ pass
+
+ def remove_export(self, context, volume):
+ '''remove_export
+
+ remove_export
+
+ :param context:
+ :param volume:
+ :return:
+ '''
+ pass
+
+ def ensure_export(self, context, volume):
+ '''ensure_export
+
+ ensure_export
+
+ :param context:
+ :param volume:
+ :return:
+ '''
+ pass
+
+ def check_for_export(self, context, volume_id):
+ '''check_for_export
+
+ check_for_export
+
+ :param context:
+ :param volume_id:
+ :return:
+ '''
+ pass
+
+ def create_snapshot(self, snapshot):
+ '''create_snapshot
+
+ create_snapshot
+
+ :param snapshot:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start create_snapshot() "))
+ model_update = {}
+ volume = self.db.volume_get(self.context, snapshot['volume_id'])
+ vol_meta = volume['volume_metadata']
+ vol_meta_dict = metadata_to_dict(vol_meta)
+ vol_urn = vol_meta_dict.get('urn')
+ if vol_urn is None:
+ LOG.error(_("vol_urn is null."))
+
+ def volume_uri_to_number(uri):
+ hi, si, ti = uri.rpartition('/')
+ return ti
+
+ snapshot_id = snapshot['id']
+ snapshot_uuid = str(snapshot_id).replace('-', '')
+ body = self.volume_snapshot_proxy.create_volumesnapshot(
+ snapshot_uuid=snapshot_uuid, vol_urn=vol_urn)
+
+ if body['urn'] is None:
+ LOG.error(_("Trying to create snapshot failed, volume id is: %s"),
+ snapshot['volume_id'])
+ raise driver_exception.FusionComputeDriverException()
+
+ return model_update
+
+ def delete_snapshot(self, snapshot):
+ '''delete_snapshot
+
+ delete_snapshot
+
+ :param snapshot:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start delete_snapshot() "))
+ model_update = {}
+ snapshot_id = snapshot['id']
+ snapshot_uuid = str(snapshot_id).replace('-', '')
+ body = self.volume_snapshot_proxy.query_volumesnapshot(
+ uuid=snapshot_uuid)
+ if body is None:
+ return model_update
+ self.volume_snapshot_proxy.delete_volumesnapshot(id=snapshot_uuid)
+
+ return model_update
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ '''create_volume_from_snapshot
+
+ :param volume:
+ :param snapshot:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start create_volume_from_snapshot()"))
+ args_dict = {}
+
+ vol_meta = volume.get('volume_metadata')
+ vol_meta_dict = metadata_to_dict(vol_meta)
+ linked_clone = vol_meta_dict.get('linked_clone')
+ if linked_clone is None:
+ linked_clone = False
+ elif str(linked_clone).lower() == 'true':
+ linked_clone = True
+ else:
+ linked_clone = False
+ if linked_clone is True:
+ LOG.warn(_("[BRM-DRIVER] linked_clone volume not support!!"))
+ raise exception.CinderException
+
+ full_clone = vol_meta_dict.get('full_clone')
+ if full_clone is None:
+ full_clone = '1'
+ elif str(full_clone) == '0':
+ full_clone = '0'
+ else:
+ full_clone = '1'
+ args_dict['full_clone'] = full_clone
+
+ model_update = {}
+ snapshot_id = snapshot['id']
+ os_vol_id = volume['id']
+ shareable = volume.get('shareable')
+ if shareable and True == shareable:
+ LOG.info(_("[CINDER-VRM] shareable"))
+ voltype = 'share'
+ else:
+ voltype = 'normal'
+ snapshot_uuid = str(snapshot_id).replace('-', '')
+
+ volume_size = int(volume.get('size'))
+ snapshot_size = int(snapshot.get('volume_size'))
+
+ if volume_size != snapshot_size:
+ args_dict['volume_size'] = volume_size
+
+ args_dict['snapshot_uuid'] = snapshot_uuid
+ args_dict['volume_name'] = volume['name']
+ args_dict['volume_uuid'] = os_vol_id
+ args_dict['type'] = voltype
+
+ body = self.volume_snapshot_proxy.create_volume_from_snapshot(
+ **args_dict)
+ vol_urn = body['urn']
+ fc_vol_id = vol_urn.split(':')[-1]
+
+ model_update, metadata = self._vrm_get_volume_meta(id=fc_vol_id)
+ context = cinder_context.get_admin_context()
+ self.db.volume_metadata_update(context, os_vol_id, metadata, False)
+
+ return model_update
+
+ def create_cloned_volume(self, volume, src_volume):
+ LOG.info(_("[BRM-DRIVER] start create_cloned_volume()"))
+ volume['is_thin'] = True
+ LOG.info(_("[BRM-DRIVER] start create_cloned_volume()"))
+ model_update = self.create_volume(volume)
+ dest_volume_size = volume['size']
+ src_volume_size = src_volume['size']
+ if dest_volume_size != src_volume_size:
+ raise exception.InvalidParameterValue(err=_('valid volume size'))
+
+ dest_volume_uri, items = self._vrm_unpack_provider_location(
+ model_update['provider_location'], 'uri')
+ dest_volume_urn, items = self._vrm_unpack_provider_location(
+ model_update['provider_location'], 'urn')
+ src_vol_meta = src_volume.get('volume_metadata')
+ src_vol_meta_dict = metadata_to_dict(src_vol_meta)
+ src_volume_uri = src_vol_meta_dict.get('uri')
+ src_volume_id = src_volume_uri.split('/')[
+ len(src_volume_uri.split('/')) - 1]
+ args_dict = {}
+ args_dict['src_volume_id'] = src_volume_id
+ args_dict['dest_volume_urn'] = dest_volume_urn
+
+ try:
+ self.volume_proxy.clone_volume(**args_dict)
+ except exception.CinderException as ex:
+ volume['provider_location'] = model_update['provider_location']
+ LOG.info(
+ _("[CINDER-BRM] clone_volume exception , delete (%s)") %
+ model_update['provider_location'])
+ self.delete_volume(volume)
+ raise ex
+ return model_update
+
+ def clone_image(self, context, volume, image_location, image_meta,
+ image_service):
+ '''lone_image
+
+ :param volume:
+ :param image_location:
+ :param image_id:
+ :param image_meta:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start clone_image "))
+
+ properties = image_meta.get('properties', None)
+
+ # check quick start
+ if properties is not None:
+ quick_start = properties.get('quick_start', None)
+ if quick_start is not None and str(quick_start).lower() == 'true':
+ LOG.info(_("[BRM-DRIVER] image has quick start property"))
+ # update quick_start in admin metadata
+ admin_context = cinder_context.get_admin_context()
+ self.db.volume_admin_metadata_update(admin_context,
+ volume.get('id'),
+ {'quick_start': 'True'},
+ False)
+ return None, False
+
+ if properties is None:
+ return None, False
+ elif 'template' != properties.get('__image_source_type', None):
+ LOG.info(_("[BRM-DRIVER] image_type is not template"))
+ return None, False
+ else:
+ LOG.info(_("[BRM-DRIVER] image_type is template"))
+
+ image_type = 'template'
+
+ context = cinder_context.get_admin_context()
+ args_dict = {}
+ vol_size = int(volume.get('size'))
+ image_id = image_meta['id']
+ self._check_image_os(image_id, image_meta, vol_size)
+
+ args_dict['image_id'] = image_id
+ os_vol_id = volume.get('id')
+ args_dict['volume_id'] = os_vol_id
+ args_dict['volume_size'] = vol_size
+
+ vol_meta = volume.get('volume_metadata')
+ vol_meta_dict = metadata_to_dict(vol_meta)
+ linked_clone = vol_meta_dict.get('linked_clone')
+ if linked_clone is None:
+ linked_clone = False
+ elif str(linked_clone).lower() == 'true':
+ linked_clone = True
+ else:
+ linked_clone = False
+
+ hw_image_location = properties.get('__image_location', None)
+ if hw_image_location is None or hw_image_location == "":
+ msg = _('[BRM-DRIVER] hw_image_location is null')
+ LOG.error(msg)
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+ args_dict['image_location'] = hw_image_location
+ LOG.info(_('[BRM-DRIVER] image_location is %s') % args_dict[
+ 'image_location'])
+
+ args_dict['image_type'] = image_type
+
+ ds_meta = {}
+ try:
+ ds_meta['ds_name'] = volume.get('host').split('#')[1]
+ except exception.CinderException as ex:
+ raise ex
+
+ datastore = self._choose_datastore(ds_meta)
+ if not datastore:
+ LOG.info(_("[CINDER-VRM] datastore [%s],"), datastore)
+ raise exception.InvalidParameterValue(err=_('invalid datastore'))
+
+ args_dict['ds_urn'] = datastore.get('urn')
+ is_thin = FC_DRIVER_CONF.vrm_is_thin
+ is_thin = self.check_thin(datastore, is_thin)
+ args_dict['is_thin'] = is_thin
+
+ args_dict['cluster_urn'] = self._get_cluster_by_dsurn(
+ datastore.get('urn'))
+ LOG.info(_("[BRM-DRIVER] cluster_urn [%s]") % args_dict['cluster_urn'])
+
+ if args_dict.get('volume_sequence_num') is None:
+ args_dict['volume_sequence_num'] = 1
+
+ LOG.info(_("[BRM-DRIVER] %s image_type is template ") % image_id)
+ if linked_clone:
+ urn = self.vm_proxy.create_linkclone_from_template(**args_dict)
+ else:
+ urn = self.vm_proxy.create_volume_from_template(**args_dict)
+
+ temp_str = str(urn)
+ fc_vol_id = temp_str[temp_str.rfind(':') + 1:]
+
+ share = volume.get('shareable')
+ LOG.info('[BRM-DRIVER] shareable [%s]', share)
+ if str(share).lower() == 'true':
+ try:
+ self.volume_proxy.modify_volume(volume_id=fc_vol_id,
+ type='share')
+ except Exception as ex:
+ LOG.error(_("modify volume to share is failed "))
+ self.delete_volume(volume)
+ raise ex
+
+ model_update, metadata = self._vrm_get_volume_meta(fc_vol_id)
+ self.db.volume_metadata_update(context, os_vol_id, metadata, False)
+ return model_update, True
+
+ def copy_image_to_volume(self, context, volume, image_service, image_id):
+ '''clone_image
+
+ :param volume:
+ :param image_location:
+ :param image_id:
+ :param image_meta:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start copy_image_to_volume [%s]") % image_id)
+ image_meta = image_service.show(context, image_id)
+ args_dict = {}
+ vol_size = int(volume.get('size'))
+ self._check_image_os(image_id, image_meta, vol_size)
+
+ args_dict['image_id'] = image_id
+
+ min_disk = image_meta.get('min_disk')
+ if self._check_quick_start(volume.get('id')):
+ args_dict['quick_start'] = True
+ if min_disk:
+ args_dict['image_size'] = int(min_disk)
+ else:
+ msg = _('[BRM-DRIVER] image min_disk is none when create from '
+ 'quick start image')
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ os_vol_id = volume.get('id')
+ args_dict['volume_id'] = os_vol_id
+ args_dict['volume_size'] = vol_size
+ args_dict['is_thin'] = FC_DRIVER_CONF.vrm_is_thin
+
+ vol_meta = volume.get('volume_metadata')
+ vol_meta_dict = metadata_to_dict(vol_meta)
+ linked_clone = vol_meta_dict.get('linked_clone')
+ args_dict['volume_urn'] = vol_meta_dict.get('urn')
+ if linked_clone is None:
+ linked_clone = False
+ elif str(linked_clone).lower() == 'true':
+ linked_clone = True
+ else:
+ linked_clone = False
+
+ # check quick start
+ if not linked_clone and self._check_quick_start(volume.get('id')):
+ linked_clone = True
+ properties = image_meta.get('properties', None)
+ if properties is None or properties == "":
+ image_type = 'glance'
+ else:
+ args_dict['volume_sequence_num'] = properties.get('__sequence_num')
+ image_type = properties.get('__image_source_type', None)
+ types = ['template', 'nfs', 'uds', 'glance']
+
+ if image_type is not None and image_type not in types:
+ msg = _('[BRM-DRIVER] image type is not support ')
+ LOG.error(msg)
+ raise exception.ImageUnacceptable(image_id=image_id,
+ reason=msg)
+
+ if image_type is None:
+ image_type = 'glance'
+
+ if image_type != 'glance':
+ hw_image_location = properties.get('__image_location', None)
+ if hw_image_location is None or hw_image_location == "":
+ msg = _('[BRM-DRIVER] hw_image_location is null')
+ LOG.error(msg)
+ raise exception.ImageUnacceptable(image_id=image_id,
+ reason=msg)
+
+ args_dict['image_location'] = hw_image_location
+ LOG.info(_('[BRM-DRIVER] image_location is %s') % args_dict[
+ 'image_location'])
+
+ args_dict['image_type'] = image_type
+ ds_meta = {}
+ try:
+ ds_meta['ds_name'] = volume.get('host').split('#')[1]
+ except exception.CinderException as ex:
+ raise ex
+
+ datastore = self._choose_datastore(ds_meta)
+ if not datastore:
+ LOG.info(_("[CINDER-VRM] datastore [%s],"), datastore)
+ raise exception.InvalidParameterValue(err=_('found no datastore'))
+
+ args_dict['ds_urn'] = datastore.get('urn')
+ is_thin = FC_DRIVER_CONF.vrm_is_thin
+ is_thin = self.check_thin(datastore, is_thin)
+ if linked_clone:
+ if not is_thin:
+ LOG.error(_("[CINDER-VRM] datastore does support linedclone"))
+ raise exception.InvalidParameterValue(
+ err=_('datastore does support linedclone'))
+ args_dict['is_thin'] = is_thin
+
+ args_dict['cluster_urn'] = self._get_cluster_by_dsurn(
+ datastore.get('urn'))
+ LOG.info(
+ _("[BRM-DRIVER] self.cluster_urn [%s]") % args_dict['cluster_urn'])
+
+ args_dict['auth_token'] = context.auth_token
+
+ if args_dict.get('volume_sequence_num') is None:
+ args_dict['volume_sequence_num'] = 1
+
+ if linked_clone:
+ urn = self.vm_proxy.create_linkClone_from_extend(**args_dict)
+ else:
+ try:
+ temp_str = str(vol_meta_dict.get('urn'))
+ fc_vol_id = temp_str[temp_str.rfind(':') + 1:]
+ self.volume_proxy.modify_volume(volume_id=fc_vol_id,
+ type='normal')
+ except Exception as ex:
+ LOG.error(_("modify volume to normal is failed "))
+ self.delete_volume(volume)
+ raise ex
+ urn = self.vm_proxy.create_volume_from_extend(**args_dict)
+
+ temp_str = str(urn)
+ fc_vol_id = temp_str[temp_str.rfind(':') + 1:]
+ model_update, metadata = self._vrm_get_volume_meta(fc_vol_id)
+
+ share = volume.get('shareable')
+ LOG.info('[BRM-DRIVER] shareable [%s]', share)
+ if str(share).lower() == 'true':
+ try:
+ self.volume_proxy.modify_volume(volume_id=fc_vol_id,
+ type='share')
+ except Exception as ex:
+ LOG.error(_("modify volume to share is failed "))
+ self.delete_volume(volume)
+ raise ex
+
+ self.db.volume_metadata_update(context, os_vol_id, metadata, False)
+
+ def _check_image_os(self, image_id, image_meta, vol_size):
+ min_disk = image_meta.get('min_disk')
+ if min_disk:
+ min_disk = int(min_disk)
+ if min_disk < 4 and vol_size != min_disk:
+ prop = image_meta.get('properties')
+ if prop:
+ os_type = prop.get('__os_type')
+ os_version = prop.get('__os_version')
+ if os_type and os_version:
+ if os_type == 'Windows' and \
+ (os_version.startswith(
+ 'Windows XP') or os_version.startswith(
+ 'Windows Server 2003')):
+ msg = _(
+ "[BRM-DRIVER] volume size must equal image "
+ "min disk size while image min disk size is "
+ "smaller than 4G and os is Windows XP or "
+ "Windows Server 2003")
+ LOG.error(msg)
+ raise exception.ImageUnacceptable(
+ image_id=image_id, reason=msg)
+
+ def _generate_image_metadata(self, min_disk, location,
+ volume_sequence_num, os_option, instance):
+ """_generate_image_metadata
+
+ :param name: image name
+ :param location: image location
+ :param os_option: os type and version
+ :param instance:
+ :return:
+ """
+ if volume_sequence_num is None:
+ LOG.info(_("volume_sequence_num is None"))
+ volume_sequence_num = 1
+ metadata = {'__image_location': location or '',
+ '__image_source_type': FC_DRIVER_CONF.export_image_type,
+ '__sequence_num': volume_sequence_num}
+ if os_option is not None:
+ if os_option.get('__os_version') is not None:
+ metadata['__os_version'] = os_option.get('__os_version')
+
+ if os_option.get('__os_type') is not None:
+ metadata['__os_type'] = os_option.get('__os_type')
+
+ LOG.info(_("image metadata is: %s"), json.dumps(metadata))
+ return {'properties': metadata, 'min_disk': min_disk}
+
+ def _generate_image_location(self, image_id, context):
+ """_generate_image_location
+
+ generate image location: '172.17.1.30:/image/base/uuid/uuid.ovf'
+ :param image_id:
+ :return:
+ """
+ if FC_DRIVER_CONF.export_image_type == 'nfs':
+ fc_image_path = FC_DRIVER_CONF.fc_image_path
+ if fc_image_path:
+ format = 'xml' if FC_DRIVER_CONF.export_version == 'v1.2' else\
+ 'ovf'
+ return '%s/%s/%s.%s' % (fc_image_path, image_id, image_id,
+ format)
+ else:
+ LOG.info(_("fc_image_path is null"))
+ elif FC_DRIVER_CONF.export_image_type == 'uds':
+ if FC_DRIVER_CONF.uds_ip is not None and FC_DRIVER_CONF.uds_port\
+ is not None and FC_DRIVER_CONF.uds_bucket_name is not None:
+ uds_bucket_name = FC_DRIVER_CONF.uds_bucket_name
+ bucket_type = FC_DRIVER_CONF.uds_bucket_type
+
+ LOG.info(str(bucket_type))
+ LOG.info(context.tenant)
+
+ if bucket_type is not None:
+ if str(bucket_type) == 'wildcard':
+ project_tmp_id = context.to_dict()['project_id']
+ LOG.info(project_tmp_id)
+ if project_tmp_id is None:
+ LOG.error(_("project_id is none "))
+ raise exception.ParameterNotFound(
+ param='project_id is none')
+ else:
+ uds_bucket_name += project_tmp_id
+ ret = '%s:%s:%s:%s' % (
+ FC_DRIVER_CONF.uds_ip, FC_DRIVER_CONF.uds_port,
+ uds_bucket_name,
+ image_id)
+ return ret
+ else:
+ return None
+
+ def copy_volume_to_image(self, context, volume, image_service, image_meta):
+ '''copy_volume_to_image
+
+ :param context:
+ :param volume:
+ :param image_service:
+ :param image_meta:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start copy_volume_to_image() "))
+
+ fc_image_path = FC_DRIVER_CONF.fc_image_path
+ image_id = image_meta.get('id')
+ if '/' in str(image_id):
+ image_id = image_id.split('/')[-1]
+
+ metadata = {'__image_location': '',
+ '__image_source_type': FC_DRIVER_CONF.export_image_type}
+ image_property = {'properties': metadata}
+ image_service.update(context, image_id, image_property,
+ purge_props=False)
+
+ volume_id = volume.get('id')
+ vol_size = int(volume.get('size'))
+ vol_meta = volume.get('volume_metadata')
+ vol_meta_dict = metadata_to_dict(vol_meta)
+
+ vol_image_meta = None
+ try:
+ vol_image_meta = self.volume_api.get_volume_image_metadata(
+ context, volume)
+ except exception.CinderException as ex:
+ LOG.error(_('[BRM-DRIVER] get_volume_image_metadata is error'))
+
+ vol_image_meta_dic = None
+ if vol_image_meta:
+ vol_image_meta_dic = dict(vol_image_meta.iteritems())
+
+ args_dict = {}
+ args_dict['volume_id'] = volume_id
+ args_dict['volume_size'] = vol_size
+ args_dict['image_id'] = image_id
+ args_dict['image_url'] = fc_image_path
+ args_dict['project_id'] = context.to_dict()['project_id']
+
+ share = volume.get('shareable')
+ LOG.info('[BRM-DRIVER] shareable [%s]', share)
+ if str(share).lower() == 'false':
+ args_dict['shareable'] = 'normal'
+ else:
+ args_dict['shareable'] = 'share'
+
+ if vol_meta_dict.get('urn') is None:
+ msg = _('[BRM-DRIVER] urn is null')
+ LOG.error(msg)
+ raise exception.InvalidVolumeMetadata(reason=msg)
+
+ args_dict['volume_urn'] = str(vol_meta_dict.get('urn'))
+ LOG.info(_("volume_urn is %s") % args_dict['volume_urn'])
+
+ if self.SHARED_HOSTS is None or len(self.SHARED_HOSTS) == 0:
+ msg = _('[BRM-DRIVER] SHARED_HOSTS is none')
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
+
+ fc_vol_id = \
+ args_dict['volume_urn'][args_dict['volume_urn'].rfind(':') + 1:]
+ volume_body = self.volume_proxy.query_volume(id=fc_vol_id)
+ source_ds_urn = volume_body['datastoreUrn']
+ LOG.info(_("[BRM-DRIVER] datastoreUrn [%s]") % source_ds_urn)
+ args_dict['cluster_urn'] = self._get_cluster_by_dsurn(source_ds_urn)
+ LOG.info(
+ _("[BRM-DRIVER] cluster_urn [%s]") % args_dict['cluster_urn'])
+
+ # args_dict['cluster_urn'] = self.cluster_urn
+ name = image_service.show(context, image_id).get('name')
+ args_dict['image_type'] = FC_DRIVER_CONF.export_image_type
+ args_dict['auth_token'] = context.auth_token
+
+ location = self._generate_image_location(image_id, context)
+ metadata = {'__image_location': location or '',
+ '__image_source_type': FC_DRIVER_CONF.export_image_type}
+ image_property = {'properties': metadata}
+ image_service.update(context, image_id, image_property,
+ purge_props=False)
+
+ try:
+ LOG.info(_('location %s ') % location)
+ volume_sequence_num = self.vm_proxy.export_volume_to_image(
+ **args_dict)
+ except Exception as ex:
+ LOG.info(_("[BRM-DRIVER] deletedelete image id:[%s]") % image_id)
+ image_service.delete(context, image_id)
+ raise ex
+
+ metadata = self._generate_image_metadata(vol_size, location,
+ volume_sequence_num,
+ vol_image_meta_dic, None)
+ if 'glance' != args_dict.get('image_type'):
+ image_service.update(context, image_id, {},
+ data='/home/vhd/G1-1.vhd')
+ image_service.update(context, image_id, metadata, purge_props=False)
+ LOG.info(_('image %s create success') % name)
+
+ def attach_volume(self, context, volume_id, instance_uuid,
+ host_name_sanitized, mountpoint):
+ '''attach_volume
+
+ :param context:
+ :param volume_id:
+ :param instance_uuid:
+ :param host_name_sanitized:
+ :param mountpoint:
+ :return:
+ '''
+ pass
+
+ def detach_volume(self, context, volume):
+ '''detach_volume
+
+ :param context:
+ :param volume:
+ :return:
+ '''
+ pass
+
+ def initialize_connection(self, volume, connector):
+ '''initialize_connection
+
+ :param volume:
+ :param connector:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start initialize_connection() "))
+ connection = {'data': {}}
+ LOG.info('volume: %s', volume)
+ admin_context = cinder_context.get_admin_context()
+ vol_meta = self.db.volume_metadata_get(admin_context, volume['id'])
+ connection['vol_urn'] = vol_meta['urn']
+ return connection
+
+ def terminate_connection(self, volume, connector, **kwargs):
+ '''terminate_connection
+
+ :param volume:
+ :param connector:
+ :param force:
+ :return:
+ '''
+ pass
+
+ def retype(self, context, volume, new_type, diff, host):
+ '''retype
+
+ :param context:
+ :param volume:
+ :param new_type:
+ :param diff:
+ :param host:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start retype() "))
+ LOG.info(_(" new volume type [%s]"), new_type)
+
+ args_dict = {}
+ is_thin = FC_DRIVER_CONF.vrm_is_thin
+ if str(is_thin).lower() == 'true':
+ args_dict['migrate_type'] = 1
+ else:
+ args_dict['migrate_type'] = 2
+ shareable = volume.get('shareable')
+ if shareable is True:
+ LOG.info(_("[BRM-DRIVER] shareable"))
+
+ if diff and diff.get('extra_specs'):
+ pvscsi_support = diff.get('extra_specs').get('hw:passthrough')
+ if pvscsi_support and pvscsi_support[0] != pvscsi_support[1]:
+ msg = (_('rdm volume can only retyped to rdm volume type.'))
+ LOG.error(msg)
+ raise exception.InvalidInput(reason=msg)
+
+ extra_specs = new_type.get('extra_specs')
+ if extra_specs is None:
+ LOG.info(_("[BRM-DRIVER] extra_specs is None"))
+ return True, None
+ new_backend_name = extra_specs.get('volume_backend_name')
+ if new_backend_name is None:
+ LOG.info(_("[BRM-DRIVER] new_backend_name is None"))
+ return True, None
+
+ vol_meta = volume.get('volume_metadata')
+ vol_meta_dict = metadata_to_dict(vol_meta)
+ linked_clone = vol_meta_dict.get('linked_clone')
+ if linked_clone is not None:
+ if str(linked_clone).lower() == 'true':
+ msg = (_('linked volume can not be retyped. '))
+ LOG.error(msg)
+ raise exception.InvalidInput(reason=msg)
+
+ snapshots = self.db.snapshot_get_all_for_volume(context,
+ volume.get('id'))
+ if len(snapshots):
+ msg = _("Volume still has %d dependent snapshots") % len(snapshots)
+ raise exception.InvalidVolume(reason=msg)
+
+ try:
+ source_ds_name = volume.get('host').split('#')[1]
+ except exception.CinderException as ex:
+ LOG.info(_("[CINDER-BRM] host format exception, host is %s ") %
+ volume.get('host'))
+ raise ex
+
+ LOG.info(_(" source_ds_name [%s]"), source_ds_name)
+
+ try:
+ new_ds_name = volume_utils.extract_host(host['host'], 'pool')
+ except exception.CinderException as ex:
+ LOG.info(
+ _("[CINDER-BRM] host format exception, host is %s ") %
+ volume.get('host'))
+ raise ex
+
+ LOG.info(_(" new_ds_name [%s]"), new_ds_name)
+
+ if source_ds_name == new_ds_name:
+ LOG.info(_("[CINDER-BRM] source ds_name == dest ds_name"))
+ return True, None
+
+ datastores = self.datastore_proxy.list_datastore()
+ for datastore in datastores:
+ ds_name = datastore.get('name')
+ if ds_name is not None:
+ LOG.info(_(" ds_name [%s]"), ds_name)
+ if new_ds_name == ds_name:
+ args_dict['dest_ds_urn'] = datastore.get('urn')
+ LOG.info(_(" new_ds_name [%s]"), new_ds_name)
+ break
+
+ vol_meta = volume.get('volume_metadata')
+ vol_meta_dict = metadata_to_dict(vol_meta)
+ volume_urn = vol_meta_dict.get('urn')
+ args_dict['volume_urn'] = volume_urn
+ fc_vol_id = volume_urn[volume_urn.rfind(':') + 1:]
+ volume_body = self.volume_proxy.query_volume(id=fc_vol_id)
+ source_ds_urn = volume_body['datastoreUrn']
+
+ args_dict['volume_id'] = fc_vol_id
+ args_dict['speed'] = 30
+
+ if None == args_dict.get('dest_ds_urn'):
+ LOG.info(_("[BRM-DRIVER] no dest_ds_urn"))
+ return False, None
+ else:
+ if source_ds_urn == args_dict['dest_ds_urn']:
+ LOG.info(_("[BRM-DRIVER] same ds [%s]"), source_ds_urn)
+ return True, None
+ vm_body = self.vm_proxy.query_vm_volume(**args_dict)
+ self._check_replications(fc_vol_id)
+ if vm_body is None:
+ self.volume_proxy.migrate_volume(**args_dict)
+ else:
+ vm_urn = vm_body.get('urn')
+ vm_id = vm_urn[-10:]
+ args_dict['vm_id'] = vm_id
+ self.vm_proxy.migrate_vm_volume(**args_dict)
+
+ LOG.info(_("[CINDER-VRM] fc_vol_id [%s] ") % fc_vol_id)
+ model_update, metadata = self._vrm_get_volume_meta(id=fc_vol_id)
+ self.db.volume_metadata_update(context, volume['id'], metadata, False)
+ LOG.info(_("[BRM-DRIVER] retype return"))
+ return True, None
+
+ def manage_existing(self, volume, existing_ref):
+ """Brings an existing backend storage object under Cinder management.
+
+ existing_ref is passed straight through from the API request's
+ manage_existing_ref value, and it is up to the driver how this should
+ be interpreted. It should be sufficient to identify a storage object
+ that the driver should somehow associate with the newly-created cinder
+ volume structure.
+ """
+
+ LOG.info(_("[BRM-DRIVER] start manage_existing() "))
+ metadata = dict(
+ (item['key'], item['value']) for item in volume['volume_metadata'])
+ volInfoUrl = metadata.get('volInfoUrl', None)
+ if volInfoUrl is None:
+ LOG.info(_("manage_existing: volInfoUrl is None"))
+ raise driver_exception.FusionComputeDriverException()
+
+ name = volume['name']
+ uuid = volume['id']
+ shareable = volume.get('shareable')
+ if shareable is True:
+ voltype = 'share'
+ else:
+ voltype = 'normal'
+
+ quantity_GB = int(volume['size'])
+
+ args_dict = {}
+ args_dict['name'] = name
+ args_dict['quantityGB'] = quantity_GB
+ args_dict['type'] = voltype
+ args_dict['volInfoUrl'] = volInfoUrl
+ args_dict['uuid'] = uuid
+
+ args_dict['maxReadBytes'] = 0
+ args_dict['maxWriteBytes'] = 0
+ args_dict['maxReadRequest'] = 0
+ args_dict['maxWriteRequest'] = 0
+ body = self.volume_proxy.manage_existing(**args_dict)
+
+ model_update = {}
+ temp_str = str(body.get('urn'))
+ fc_vol_id = temp_str[temp_str.rfind(':') + 1:]
+ model_update, metadata = self._vrm_get_volume_meta(id=fc_vol_id)
+ context = cinder_context.get_admin_context()
+ self.db.volume_metadata_update(context, uuid, metadata, False)
+ if existing_ref:
+ LOG.info("manage_existing: existing_ref[%s]", existing_ref)
+ for key, value in existing_ref.items():
+ self.db.volume_glance_metadata_create(context,
+ uuid,
+ key, value)
+
+ return model_update
+
+ def manage_existing_get_size(self, volume, existing_ref):
+ """Return size of volume to be managed by manage_existing.
+
+ When calculating the size, round up to the next GB.
+ """
+ vol_size = 0
+ for meta in volume['volume_metadata']:
+ LOG.error("meta: %s" % str(meta))
+ if meta.key == 'quantityGB':
+ vol_size = int(meta.value)
+ break
+
+ volume['size'] = vol_size
+
+ return vol_size
+
+ def unmanage(self, volume):
+ """Removes the specified volume from Cinder management.
+
+ Does not delete the underlying backend storage object.
+
+ For most drivers, this will not need to do anything. However, some
+ drivers might use this call as an opportunity to clean up any
+ Cinder-specific configuration that they have associated with the
+ backend storage object.
+
+ :param volume:
+ :return:
+ """
+
+ LOG.info(_("[BRM-DRIVER] start unmanage() "))
+
+ provider_location = volume['provider_location']
+ if provider_location is None or provider_location == '':
+ LOG.error(_("[BRM-DRIVER]provider_location is null "))
+ return
+ volume_uri, items = \
+ self._vrm_unpack_provider_location(volume['provider_location'],
+ 'uri')
+ self.volume_proxy.unmanage(volume_uri=volume_uri)
+
+ def migrate_volume(self, context, volume, host):
+ '''migrate_volume
+
+ :param context:
+ :param volume:
+ :param host:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start migrate_volume() "))
+
+ raise NotImplementedError()
+
+ def copy_volume_data(self, context, src_vol, dest_vol, remote=None):
+ '''copy_volume_data
+
+ :param context:
+ :param src_vol:
+ :param dest_vol:
+ :param remote:
+ :return:
+ '''
+ msg = (_('copy_volume_data. '))
+ LOG.error(msg)
+ raise NotImplementedError()
+
+ def extend_volume(self, volume, new_size):
+ '''extend_volume
+
+ :param volume:
+ :param new_size:
+ :return:
+ '''
+ LOG.info(_("[BRM-DRIVER] start extend_volume() "))
+ vol_meta = volume.get('volume_metadata')
+ vol_meta_dict = metadata_to_dict(vol_meta)
+ vol_uri = vol_meta_dict.get('uri')
+ fc_vol_urn = vol_meta_dict.get('urn')
+ if fc_vol_urn is not None:
+ fc_vol_id = fc_vol_urn[fc_vol_urn.rfind(':') + 1:]
+ self._check_replications(fc_vol_id)
+
+ LOG.info(_("[CINDER-VRM]extend_volume fc_vol_id [%s] ") % fc_vol_id)
+
+ if vol_uri is not None:
+ temp_str = str(vol_meta_dict.get('urn'))
+ fc_vol_id = temp_str[temp_str.rfind(':') + 1:]
+ vrm_volume = self.volume_proxy.query_volume(id=fc_vol_id)
+ sizeGB = vrm_volume.get('quantityGB')
+ if int(sizeGB) < int(new_size):
+ self._check_datastore_capacity(vrm_volume, volume['size'],
+ new_size)
+ self.volume_proxy.extend_volume(volume_uri=vol_uri,
+ size=new_size * 1024)
+ else:
+ LOG.info(_("[CINDER-VRM]sizeGB[%s] avoid extend") % sizeGB)
+ else:
+ raise exception.ExtendVolumeError
+
+ fc_vol_id = fc_vol_urn[fc_vol_urn.rfind(':') + 1:]
+ model_update, metadata = self._vrm_get_volume_meta(id=fc_vol_id)
+ context = cinder_context.get_admin_context()
+ self.db.volume_metadata_update(context, volume['id'], metadata, False)
+
+ def _check_datastore_capacity(self, fc_volume, old_size, new_size):
+ # keep same strage with capacity_filter
+ datastore_urn = fc_volume.get('datastoreUrn')
+ extra_size = new_size - old_size
+ if datastore_urn:
+ datastore_id = datastore_urn[datastore_urn.rfind(':') + 1:]
+ datastore = self.datastore_proxy.query_datastore(id=datastore_id)
+
+ if 'NORMAL' != datastore['status']:
+ msg = _('Datastore status is abnormal.')
+ raise exception.ExtendVolumeError(message=msg)
+
+ reserved = float(self.reserved_percentage) / 100
+ total = float(datastore['capacityGB'])
+ free_space = datastore['freeSizeGB']
+ free = free_space - math.floor(total * reserved)
+
+ if self.thin_provision and self.over_ratio >= 1:
+ provisioned_ratio = ((datastore['usedSizeGB'] +
+ extra_size) / total)
+ if provisioned_ratio >= self.over_ratio:
+ LOG.warning(_(
+ "Insufficient free space for thin provisioning. "
+ "The ratio of provisioned capacity over total "
+ "capacity "
+ "%(provisioned_ratio).2f has exceeded the maximum "
+ "over "
+ "subscription ratio %(oversub_ratio).2f."),
+ {"provisioned_ratio": provisioned_ratio,
+ "oversub_ratio": self.over_ratio})
+ msg = _('Insufficient free space for thin provisioning.')
+ raise exception.ExtendVolumeError(message=msg)
+ else:
+ free_virtual = free * self.over_ratio
+ if free_virtual < extra_size:
+ msg = _(
+ 'Insufficient free space for thin provisioning.')
+ raise exception.ExtendVolumeError(message=msg)
+ else:
+ return True
+
+ if free < extra_size:
+ msg = _(
+ 'The new size of the volume exceed the capacity of the '
+ 'datastore.')
+ raise exception.ExtendVolumeError(message=msg)
+
+ def backup_volume(self, context, backup, backup_service):
+ """Create a new backup from an existing volume.
+
+ backup['status']
+ backup['object_count']
+ backup['_sa_instance_state']
+ backup['user_id']
+ backup['service']:q
+
+ backup['availability_zone']
+ backup['deleted']
+ backup['created_at']
+ backup['updated_at']
+ backup['display_description']
+ backup['project_id']
+ backup['host']
+ backup['container']
+ backup['volume_id']
+ backup['display_name']
+ backup['fail_reason']
+ backup['deleted_at']
+ backup['service_metadata']
+ backup['id']
+ backup['size']
+ -------- -------- -------- --------
+ backup['backup_type']
+ backup['volume_name']
+ backup['snap_name']
+ backup['snap_id']
+ backup['snap_parent_name']
+ backup['snap_last_name']
+ backup['clone_volume_name']
+ backup['storage_ip']
+ backup['storage_pool_id']
+ backup['volume_offset']
+ backup['incremental']
+ backup['is_close_volume']
+ backup['is_bootable']
+ backup['image_id']
+
+ backup['volume_size']
+ volume_file
+ backup_metadata
+
+backup db:CREATED_AT | UPDATED_AT | DELETED_AT | DELETED
+| ID | VOLUME_ID | USER_ID | PROJECT_ID | HOST | AVAILABILITY_ZONE
+| DISPLAY_NAME | DISPLAY_DESCRIPTION | CONTAINER | STATUS | FAIL_REASON
+| SERVICE_METADATA | SERVICE | SIZE | OBJECT_COUNT
+
+
+snapshot db: CREATED_AT|UPDATED_AT|DELETED_AT| DELETED |ID |VOLUME_ID|USER_ID
+ |PROJECT_ID| STATUS | PROGRESS | VOLUME_SIZE | SCHEDULED_AT | DISPLAY_NAME
+ | DISPLAY_DESCRIPTION | PROVIDER_LOCATION | ENCRYPTION_KEY_ID |
+ VOLUME_TYPE_ID | CGSNAPSHOT_ID
+
+ """
+ vol_second_os = self.db.volume_get(context, backup['volume_id'])
+
+ LOG.info(
+ ('Creating a new backup for volume %s.') % vol_second_os['name'])
+ volume_file = {}
+ if vol_second_os.get('snapshot_id') != backup['container']:
+ LOG.error(
+ _("snapshot id is %s") % vol_second_os.get('snapshot_id'))
+ LOG.error(_("backup container is %s") % backup['container'])
+ raise exception.InvalidSnapshot(
+ reason="snapshot id not equal backup container")
+
+ vol_meta = vol_second_os.get('volume_metadata')
+ vol_meta_dict = metadata_to_dict(vol_meta)
+ volume_urn = vol_meta_dict.get('urn')
+ fc_vol_id = volume_urn[volume_urn.rfind(':') + 1:]
+ vol_second_fc = self.volume_proxy.query_volume(id=fc_vol_id)
+
+ volume_file['volume_name'] = vol_second_fc['volNameOnDev']
+ last_snap_id_os = vol_second_os['snapshot_id']
+ last_snap_id_fc = str(vol_second_os['snapshot_id']).replace('-', '')
+ last_snap_os = self.db.snapshot_get(context, last_snap_id_os)
+ vol_first_id = last_snap_os['volume_id']
+ volume_file['source_volume_id'] = vol_first_id
+
+ ext_params = vol_second_fc.get('drExtParams')
+ LOG.info(_("[VRM-CINDER] ext_params [%s]"), ext_params)
+ if ext_params:
+ ext_params_dic = json.loads(ext_params)
+
+ storage_type = vol_second_fc.get('storageType')
+ sd_sn = vol_second_fc.get('storageSN')
+ if storage_type == 'advanceSan':
+ volume_file['storage_type'] = 4 # advanceSan v3 volume
+ support_pvscsi = vol_second_fc.get('pvscsiSupport')
+ if support_pvscsi is not None and support_pvscsi == 1:
+ volume_file['storage_type'] = 5 # RDM : direct io volume
+ else:
+ volume_file['storage_type'] = 0
+ LOG.info(_("[VRM-CINDER] ext_params [%s]"), ext_params_dic)
+ volume_file['storage_ip'] = ext_params_dic['dsMgntIp']
+ volume_file['storage_pool_id'] = ext_params_dic['dsResourceId']
+
+ LOG.info(_("[VRM-CINDER] volume_file [%s]"), volume_file)
+ backup_list = self.db.backup_get_by_volume_id(context, vol_first_id)
+
+ last_backup = None
+ if backup_list is not None:
+ for back_tmp in backup_list:
+ if back_tmp['status'] != "available" and back_tmp[
+ 'status'] != "restoring":
+ continue
+ if last_backup is None:
+ last_backup = back_tmp
+ else:
+ if last_backup['created_at'] < back_tmp['created_at']:
+ last_backup = back_tmp
+
+ if last_backup is None:
+ volume_file['backup_type'] = 0
+ volume_file['parent_id'] = None
+ volume_file['parent_snapshot_url'] = None
+ else:
+ LOG.info(_("last_backup %s") % last_backup['id'])
+ volume_file['backup_type'] = 1
+ volume_file['parent_id'] = last_backup['id']
+
+ if last_backup['service_metadata'] is None:
+ raise exception.InvalidVolumeMetadata(
+ reason="backup service_metadata is none")
+
+ service_meta = last_backup['service_metadata']
+ str_service_meta = json.loads(service_meta)
+ parent_snapshot_id_os = str_service_meta.get('snap_id')
+ if parent_snapshot_id_os is not None:
+ parent_snapshot_id_fc = \
+ str(parent_snapshot_id_os).replace('-', '')
+
+ if storage_type == 'advanceSan':
+ parent_snapshot_fc = \
+ self.volume_snapshot_proxy.query_volumesnapshot(
+ uuid=parent_snapshot_id_fc)
+ if parent_snapshot_fc and parent_snapshot_fc.\
+ get('snapshotLunId') is not None:
+ volume_file[
+ 'parent_snapshot_url'] = \
+ 'http://' + sd_sn + '/' + parent_snapshot_fc.get(
+ 'snapshotLunId')
+ else:
+ volume_file['parent_snapshot_url'] = 'http://' + \
+ ext_params_dic[
+ 'dsMgntIp'] + \
+ '/' + \
+ ext_params_dic[
+ 'dsResourceId'] +\
+ '/' + \
+ parent_snapshot_id_fc
+ else:
+ raise exception.InvalidVolumeMetadata(
+ reason="snapshot_id is none")
+
+ LOG.info(_("vol_first_id is %s") % vol_first_id)
+ vol_first_os = self.db.volume_get(context, vol_first_id)
+ vol_first_meta = vol_first_os.get('volume_metadata')
+ vol_first_meta_dict = metadata_to_dict(vol_first_meta)
+ volume_first_urn = vol_first_meta_dict.get('urn')
+ fc_first_vol_id = volume_first_urn[volume_urn.rfind(':') + 1:]
+ LOG.info(_("fc_first_vol_id is %s") % fc_first_vol_id)
+
+ vol_source_fc = self.volume_proxy.query_volume(id=fc_first_vol_id)
+ LOG.info(_("vol_source_fc linkCloneParent is %s") % vol_source_fc[
+ 'linkCloneParent'])
+ if vol_source_fc['linkCloneParent'] is not None:
+ volume_file['is_clone_volume'] = True
+ try:
+ vol_linked_fc = self.volume_proxy.query_volume(
+ id=vol_source_fc['linkCloneParent'])
+ if storage_type == 'advanceSan':
+ volume_file['clone_volume_url'] = 'http://' + \
+ vol_linked_fc[
+ 'storageSN'] \
+ + '/' + vol_linked_fc[
+ 'lunId']
+ else:
+ linked_ext_params_dic = json.loads(
+ vol_linked_fc.get('drExtParams'))
+ volume_file['clone_volume_url'] = 'http://' + \
+ linked_ext_params_dic[
+ 'dsMgntIp'] \
+ + '/' + \
+ linked_ext_params_dic[
+ 'dsResourceId'] \
+ + '/' + vol_source_fc[
+ 'linkCloneParent']
+ except Exception:
+ LOG.error(_("clone colume not exit"))
+ volume_file['clone_volume_url'] = None
+ else:
+ volume_file['is_clone_volume'] = False
+ volume_file['clone_volume_url'] = None
+
+ volume_file['snapshot_name'] = last_snap_os['name']
+ volume_file['snapshot_id'] = last_snap_id_fc
+
+ LOG.info(_("[VRM-CINDER] volume_file [%s]"), volume_file)
+
+ volume_file['volume_size'] = vol_second_os['size']
+ LOG.info(_("[VRM-CINDER] volume_file [%s]"), volume_file)
+ if storage_type == 'advanceSan':
+ last_snapshot = self.volume_snapshot_proxy.query_volumesnapshot(
+ uuid=last_snap_id_fc)
+ if last_snapshot and last_snapshot.get(
+ 'snapshotLunId') is not None:
+ volume_file['snapshot_url'] = 'http://' + sd_sn \
+ + '/' + last_snapshot.get(
+ 'snapshotLunId')
+ else:
+ volume_file['snapshot_url'] = \
+ 'http://' + ext_params_dic['dsMgntIp'] + '/' + \
+ ext_params_dic['dsResourceId'] + '/' + last_snap_id_fc
+
+ volume_file['bootable'] = False
+ volume_file['image_id'] = None
+ try:
+ vol_image_meta = \
+ self.volume_api.get_volume_image_metadata(context,
+ vol_second_os)
+ if vol_image_meta:
+ vol_image_meta_dic = dict(vol_image_meta.iteritems())
+ volume_file['image_id'] = vol_image_meta_dic.get('image_id')
+ volume_file['bootable'] = True
+
+ except exception.CinderException as ex:
+ LOG.error(
+ _('[BRM-DRIVER] get_volume_image_metadata is error [%s]'), ex)
+
+ if vol_first_os.get('bootable') is True:
+ volume_file['bootable'] = True
+
+ LOG.info(_("[VRM-CINDER] volume_file [%s]"), volume_file)
+
+ try:
+ backup_service.backup(backup, volume_file)
+ finally:
+ LOG.info(('cleanup for volume %s.') % vol_second_os['name'])
+
+ def restore_backup(self, context, backup, volume, backup_service):
+ """Restore an existing backup to a new or existing volume."""
+
+ LOG.info(('restore_backup for volume %s.') % volume['name'])
+ volume_file = {}
+
+ vol_meta = volume.get('volume_metadata')
+ vol_meta_dict = metadata_to_dict(vol_meta)
+ volume_urn = vol_meta_dict.get('urn')
+ fc_vol_id = volume_urn[volume_urn.rfind(':') + 1:]
+ vol_second_fc = self.volume_proxy.query_volume(id=fc_vol_id)
+ ext_params = vol_second_fc.get('drExtParams')
+ if ext_params:
+ LOG.info(_("[VRM-CINDER] ext_params [%s]"), ext_params)
+ ext_params_dic = json.loads(vol_second_fc.get('drExtParams'))
+ LOG.info(_("[VRM-CINDER] ext_params [%s]"), ext_params_dic)
+ storage_type = vol_second_fc.get('storageType')
+ volume_file['restore_type'] = 0
+ if backup['volume_id'] == volume['id']:
+ volume_file['restore_type'] = 1
+ service_meta = backup['service_metadata']
+ str_service_meta = json.loads(service_meta)
+ last_snapshot_id_openstack = str_service_meta.get('snap_id')
+ last_snapshot_id_fc = str(last_snapshot_id_openstack).replace('-',
+ '')
+ last_snapshot_fc = self.volume_snapshot_proxy.query_volumesnapshot(
+ uuid=last_snapshot_id_fc)
+ if last_snapshot_fc: # only the last backup has snapshot
+ if storage_type == 'advanceSan':
+ volume_file['lastest_snapshot_url'] = 'http://' + \
+ vol_second_fc[
+ 'storageSN'] +\
+ '/' + \
+ last_snapshot_fc[
+ 'snapshotLunId']
+ else:
+ volume_file['lastest_snapshot_url'] = 'http://' + \
+ ext_params_dic[
+ 'dsMgntIp'] \
+ + '/' + \
+ ext_params_dic[
+ 'dsResourceId'] \
+ + '/' + \
+ last_snapshot_id_fc
+
+ if storage_type == 'advanceSan':
+ volume_file['storage_type'] = 4
+ pvscsi_support = vol_second_fc.get('pvscsiSupport')
+ if pvscsi_support is not None and pvscsi_support == 1:
+ volume_file['storage_type'] = 5 # RDM: direct io volume
+ volume_file['volume_url'] = 'http://' + vol_second_fc[
+ 'storageSN'] + '/' + vol_second_fc['lunId']
+ else:
+ volume_file['storage_type'] = 0
+ volume_file['volume_url'] = vol_second_fc['volInfoUrl']
+ LOG.info(_("[VRM-CINDER] volume_file [%s]"), volume_file)
+ volume_file['storage_ip'] = ext_params_dic['dsMgntIp']
+ volume_file['storage_pool_id'] = ext_params_dic['dsResourceId']
+ volume_file['volume_offset'] = True
+ volume_file['volume_name'] = vol_second_fc['volNameOnDev']
+
+ if vol_second_fc['linkCloneParent'] is not None:
+ try:
+ vol_linked_fc = self.volume_proxy.query_volume(
+ id=vol_second_fc['linkCloneParent'])
+ if vol_second_fc['storageType'] == 'advanceSan':
+ volume_file['clone_volume_url'] = 'http://' + \
+ vol_linked_fc[
+ 'storageSN'] + \
+ vol_linked_fc['lunId']
+ else:
+ linked_ext_params_dic = json.loads(
+ vol_linked_fc.get('drExtParams'))
+ volume_file['clone_volume_url'] = 'http://' + \
+ linked_ext_params_dic[
+ 'dsMgntIp'] \
+ + '/' + \
+ linked_ext_params_dic[
+ 'dsResourceId'] \
+ + '/' + vol_second_fc[
+ 'linkCloneParent']
+ except Exception:
+ LOG.error(_("clone colume not exit"))
+ volume_file['clone_volume_url'] = None
+ else:
+ volume_file['clone_volume_url'] = None
+
+ try:
+ backup_service.restore(backup, volume['id'], volume_file)
+ finally:
+ LOG.info(('cleanup for volume %s.') % volume['name'])
+
+ def delete_cgsnapshot(self, context, cgsnapshot):
+ """Delete a cgsnapshot."""
+ model_update = {}
+ model_update['status'] = cgsnapshot['status']
+ cgsnapshot_id = cgsnapshot['id']
+ LOG.info(_('[BRM-DRIVER] start to delete cgsnapshot [%s]'),
+ cgsnapshot_id)
+
+ snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
+ cgsnapshot_id)
+ if snapshots and len(snapshots) > 0:
+ for snapshot in snapshots:
+ snapshot_id = snapshot['id']
+ snapshot_uuid = str(snapshot_id).replace('-', '')
+ body = self.volume_snapshot_proxy.query_volumesnapshot(
+ uuid=snapshot_uuid)
+ if body is None:
+ snapshot['status'] = 'deleted'
+ continue
+ try:
+ self.volume_snapshot_proxy.delete_volumesnapshot(
+ id=snapshot_uuid)
+ snapshot['status'] = 'deleted'
+ except Exception as ex:
+ LOG.exception(ex)
+ snapshot['status'] = 'error_deleting'
+ model_update['status'] = 'error_deleting'
+ return model_update, snapshots
+
+ def create_cgsnapshot(self, context, cgsnapshot):
+ model_update = {}
+ consistencygroup_id = cgsnapshot['consistencygroup_id']
+ cgsnapshot_id = cgsnapshot['id']
+ snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
+ cgsnapshot_id)
+ volumes = self.db.volume_get_all_by_group(context, consistencygroup_id)
+ snapshot_uuids = []
+ if volumes is None or len(volumes) <= 0:
+ return model_update
+ else:
+ for volume_ref in volumes:
+ for snapshot in snapshots:
+ if snapshot['volume_id'] == volume_ref['id']:
+ vol_meta = volume_ref['volume_metadata']
+ vol_meta_dict = metadata_to_dict(vol_meta)
+ vol_urn = vol_meta_dict.get('urn')
+ if vol_urn is None:
+ LOG.warn(_(
+ "vol_urn is null while creating cgsnapshot."))
+ snapshot_id = snapshot['id']
+ snapshot_uuid = str(snapshot_id).replace('-', '')
+ try:
+ body = self.volume_snapshot_proxy.\
+ create_volumesnapshot(
+ snapshot_uuid=snapshot_uuid,
+ vol_urn=vol_urn, enable_active=False)
+ except Exception as ex:
+ LOG.exception(ex)
+ snapshot['status'] = 'error'
+ model_update['status'] = 'error'
+
+ if body['urn'] is None:
+ snapshot['status'] = 'error'
+ model_update['status'] = 'error'
+ snapshot_uuids.append(snapshot_uuid)
+ try:
+ self.volume_snapshot_proxy.active_snapshots(
+ snapshot_uuids=snapshot_uuids)
+ except Exception as ex:
+ LOG.exception(ex)
+ for snapshot in snapshots:
+ snapshot['status'] = 'error'
+ model_update['status'] = 'error'
+
+ return model_update, snapshots
+
+ def _get_cluster_by_dsurn(self, ds_urn):
+ """Return clusterUrn by dsurn"""
+ # datastore_id = ds_urn[ds_urn.rfind(':') + 1:]
+ clusters = self.cluster_proxy.list_cluster()
+ random.shuffle(clusters)
+ for cluster in clusters:
+ cluster_urn = cluster.get('urn')
+ args_dict = {}
+ args_dict['scope'] = cluster_urn
+ try:
+ datastores = self.datastore_proxy.list_datastore(**args_dict)
+ for datastore in datastores:
+ if datastore['urn'] == ds_urn:
+ hosts = self.cluster_proxy.list_hosts(
+ clusterUrn=cluster_urn)
+ for host in hosts:
+ if host["status"] == "normal" and host[
+ "isMaintaining"] is False:
+ return cluster_urn
+ except Exception as ex:
+ LOG.error(
+ _('[BRM-DRIVER] get_volume_image_metadata is error [%s]'),
+ ex)
+ msg = _('[BRM-DRIVER] get cluster is none')
+ LOG.error(msg)
+ raise exception.VolumeDriverException(message=msg)
diff --git a/cinder/volume/drivers/huawei/fusioncompute/vrm_driver_huawei.py b/cinder/volume/drivers/huawei/fusioncompute/vrm_driver_huawei.py
new file mode 100644
index 0000000..1076df2
--- /dev/null
+++ b/cinder/volume/drivers/huawei/fusioncompute/vrm_driver_huawei.py
@@ -0,0 +1,200 @@
+# Copyright 2016 Huawei Technologies Co.,LTD.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from cinder import context as cinder_context
+from cinder.i18n import _
+from cinder.volume.drivers.huawei.vrm.vrm_driver import VRMDriver
+
+
+def metadata_to_dict(metadata):
+ result = {}
+ for item in metadata:
+ if not item.get('deleted'):
+ result[item['key']] = item['value']
+ return result
+
+
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+
+
+class VRMDriverHuawei(VRMDriver):
+ VENDOR = 'Huawei'
+ BACKEND = 'VRM'
+ VERSION = 'v1.1'
+
+ def __init__(self, *args, **kwargs):
+ '''__init__
+
+ :param args:
+ :param kwargs:
+ :return:
+ '''
+ super(VRMDriverHuawei, self).__init__(*args, **kwargs)
+
+ def create_volume(self, volume):
+ '''create_volume
+
+ create_volume
+ {
+ "name":string,
+ quantityGB:integer,
+ datastoreUrn:string,
+ "isThin":boolean,
+ "type":string,
+ indepDisk:boolean,
+ persistentDisk:boolean
+ }
+
+ :param volume:
+ :return:
+ '''
+ LOG.info(_("[CINDER-VRM] start create_volume() "))
+
+ vol_meta = volume.get('volume_metadata')
+ vol_meta_dict = metadata_to_dict(vol_meta)
+ context = cinder_context.get_admin_context()
+
+ temp_str = vol_meta_dict.get('urn')
+ fc_vol_id = temp_str[temp_str.rfind(':') + 1:]
+
+ name = volume.get('name')
+ model_update, metadata = self._vrm_get_volume_meta(id=fc_vol_id)
+ self.db.volume_metadata_update(context, volume['id'], metadata, False)
+ try:
+ self.volume_proxy.update_custom_properties(volume_urn=temp_str,
+ external_uuid=volume[
+ 'id'])
+ # modify volume name
+ self.volume_proxy.modify_volume(volume_id=fc_vol_id, name=name)
+ except Exception as ex:
+ LOG.error(
+ _("[CINDER-VRM] modify volume information failed after create "
+ "volume.volume id is [%s] ") % fc_vol_id)
+ LOG.exception(ex)
+ return model_update
+
+ def delete_volume(self, volume):
+ '''delete_volume
+
+ :param volume:
+ :return:
+ '''
+ LOG.info(_("[CINDER-VRM] start delete_volume() "))
+ vol_meta = volume.get('volume_metadata')
+ vol_meta_dict = metadata_to_dict(vol_meta)
+ fc_volume_name = vol_meta_dict.get('fc_volume_name')
+ temp_str = vol_meta_dict.get('urn')
+ fc_vol_id = temp_str[temp_str.rfind(':') + 1:]
+ try:
+ self.volume_proxy.update_custom_properties(volume_urn=temp_str,
+ external_uuid="")
+ # modify volume name
+ self.volume_proxy.modify_volume(volume_id=fc_vol_id,
+ name=fc_volume_name)
+ except Exception as ex:
+ LOG.error(
+ _("[CINDER-VRM] modify volume information failed after create"
+ " volume.volume id is [%s] ") % fc_vol_id)
+ LOG.exception(ex)
+ return
+
+ def _try_get_volume_stats(self, refresh=False):
+ '''_try_get_volume_stats
+
+ :param refresh:If 'refresh' is True, run the update first.
+ :return:Return the current state of the volume service.
+ '''
+# LOG.info(_("[BRM-DRIVER] start _try_get_volume_stats() "))
+ if refresh:
+ self.left_periodrate -= 1
+ if self.left_periodrate <= 0:
+ self._refresh_storage_info(refresh)
+ stats = self._build_volume_stats()
+ ds_meta = {}
+ ds_names = [ds['name'] for ds in self.SHARED_DATASTORES]
+ for pool in self.pool_list:
+ if pool not in ds_names:
+ continue
+ new_pool = {}
+ ds_meta['ds_name'] = pool
+ datastore = self._choose_datastore(ds_meta)
+ if datastore.get('storageType') == 'advanceSan' and datastore.get(
+ 'version') is not None:
+ new_pool.update(dict(consistencygroup_support=True))
+ if 'NORMAL' != datastore['status']:
+ new_pool.update(dict(
+ pool_name=pool,
+ free_capacity_gb=0,
+ reserved_percentage=self.reserved_percentage,
+ total_capacity_gb=0,
+ provisioned_capacity_gb=0,
+ max_over_subscription_ratio=self.over_ratio,
+ affine_rate=1
+ ))
+ stats["pools"].append(new_pool)
+ continue
+
+ if self.current_list is not None and pool in self.current_list:
+ new_pool.update(dict(
+ pool_name=pool,
+ free_capacity_gb="infinite",
+ reserved_percentage=self.reserved_percentage,
+ total_capacity_gb="infinite",
+ provisioned_capacity_gb=datastore['usedSizeGB'],
+ max_over_subscription_ratio=self.over_ratio,
+ affine_rate=self.affine_rate
+ ))
+ else:
+ new_pool.update(dict(
+ pool_name=pool,
+ free_capacity_gb="infinite",
+ reserved_percentage=self.reserved_percentage,
+ total_capacity_gb="infinite",
+ provisioned_capacity_gb=datastore['usedSizeGB'],
+ max_over_subscription_ratio=self.over_ratio,
+ affine_rate=1
+ ))
+ if self.thin_provision is True:
+ new_pool.update(dict(
+ thin_provisioning_support=True,
+ thick_provisioning_support=False
+ ))
+ else:
+ new_pool.update(dict(
+ thin_provisioning_support=False,
+ thick_provisioning_support=True
+ ))
+ tier_size = datastore.get('tierSize', None)
+ type_v3 = []
+ if tier_size and len(tier_size) >= 3:
+ if tier_size[0] > 0:
+ type_v3.append('ssd')
+ if tier_size[1] > 0:
+ type_v3.append('sas')
+ if tier_size[2] > 0:
+ type_v3.append('nl_sas')
+ if len(type_v3) > 0:
+ type_v3_str = ';'.join(type_v3)
+ LOG.info(_("[CINDER-BRM] type of v3 is %s"), type_v3_str)
+ new_pool.update(dict(type=type_v3_str))
+ stats["pools"].append(new_pool)
+
+ # LOG.info(_("[CINDER-BRM] (%d)--%s"), self.left_periodrate, stats)
+
+ return stats
diff --git a/doc/source/conf.py b/doc/source/conf.py
new file mode 100644
index 0000000..3ed7474
--- /dev/null
+++ b/doc/source/conf.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+sys.path.insert(0, os.path.abspath('../..'))
+# -- General configuration ----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ #'sphinx.ext.intersphinx',
+ 'oslosphinx'
+]
+
+# autodoc generation is a bit aggressive and a nuisance when doing heavy
+# text edit cycles.
+# execute "export SPHINX_DEBUG=1" in your terminal to disable
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'cinder-fusioncompute'
+copyright = u'2016, OpenStack Foundation'
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+add_module_names = True
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# -- Options for HTML output --------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+# html_theme_path = ["."]
+# html_theme = '_theme'
+# html_static_path = ['static']
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = '%sdoc' % project
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass
+# [howto/manual]).
+latex_documents = [
+ ('index',
+ '%s.tex' % project,
+ u'%s Documentation' % project,
+ u'OpenStack Foundation', 'manual'),
+]
+
+# Example configuration for intersphinx: refer to the Python standard library.
+#intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
new file mode 100644
index 0000000..1728a61
--- /dev/null
+++ b/doc/source/contributing.rst
@@ -0,0 +1,4 @@
+============
+Contributing
+============
+.. include:: ../../CONTRIBUTING.rst
diff --git a/doc/source/index.rst b/doc/source/index.rst
new file mode 100644
index 0000000..ed7bf01
--- /dev/null
+++ b/doc/source/index.rst
@@ -0,0 +1,25 @@
+.. cinder-fusioncompute documentation master file, created by
+ sphinx-quickstart on Tue Jul 9 22:26:36 2013.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to cinder-fusioncompute's documentation!
+========================================================
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+ readme
+ installation
+ usage
+ contributing
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/doc/source/installation.rst b/doc/source/installation.rst
new file mode 100644
index 0000000..e67097f
--- /dev/null
+++ b/doc/source/installation.rst
@@ -0,0 +1,12 @@
+============
+Installation
+============
+
+At the command line::
+
+ $ pip install cinder-fusioncompute
+
+Or, if you have virtualenvwrapper installed::
+
+ $ mkvirtualenv cinder-fusioncompute
+ $ pip install cinder-fusioncompute
diff --git a/doc/source/readme.rst b/doc/source/readme.rst
new file mode 100644
index 0000000..a6210d3
--- /dev/null
+++ b/doc/source/readme.rst
@@ -0,0 +1 @@
+.. include:: ../../README.rst
diff --git a/doc/source/usage.rst b/doc/source/usage.rst
new file mode 100644
index 0000000..25792e6
--- /dev/null
+++ b/doc/source/usage.rst
@@ -0,0 +1,7 @@
+========
+Usage
+========
+
+To use cinder-fusioncompute in a project::
+
+ import cinder-fusioncompute
diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder
new file mode 100644
index 0000000..e69de29
diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder
new file mode 100644
index 0000000..e69de29
diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder
new file mode 100644
index 0000000..e69de29
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
new file mode 100644
index 0000000..07d2bea
--- /dev/null
+++ b/releasenotes/source/conf.py
@@ -0,0 +1,275 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Glance Release Notes documentation build configuration file, created by
+# sphinx-quickstart on Tue Nov 3 17:40:50 2015.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+# sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'oslosphinx',
+ 'reno.sphinxext',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+# source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'cinder-fusioncompute Release Notes'
+copyright = u'2016, OpenStack Foundation'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+# The full version, including alpha/beta/rc tags.
+release = ''
+# The short X.Y version.
+version = ''
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+# language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+# today = ''
+# Else, today_fmt is used as the format for a strftime call.
+# today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = []
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+# default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+# add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+# add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+# show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+# modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+# keep_warnings = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+# html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+# html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# " v documentation".
+# html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+# html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+# html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+# html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+# html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+# html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+# html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+# html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+# html_additional_pages = {}
+
+# If false, no module index is generated.
+# html_domain_indices = True
+
+# If false, no index is generated.
+# html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+# html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+# html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+# html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+# html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+# html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+# html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'GlanceReleaseNotesdoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ # 'papersize': 'letterpaper',
+
+ # The font size ('10pt', '11pt' or '12pt').
+ # 'pointsize': '10pt',
+
+ # Additional stuff for the LaTeX preamble.
+ # 'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ ('index', 'GlanceReleaseNotes.tex', u'Glance Release Notes Documentation',
+ u'Glance Developers', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+# latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+# latex_use_parts = False
+
+# If true, show page references after internal links.
+# latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+# latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+# latex_appendices = []
+
+# If false, no module index is generated.
+# latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'glancereleasenotes', u'Glance Release Notes Documentation',
+ [u'Glance Developers'], 1)
+]
+
+# If true, show URL addresses after external links.
+# man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ ('index', 'GlanceReleaseNotes', u'Glance Release Notes Documentation',
+ u'Glance Developers', 'GlanceReleaseNotes',
+ 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+# texinfo_appendices = []
+
+# If false, no module index is generated.
+# texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+# texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+# texinfo_no_detailmenu = False
+
+# -- Options for Internationalization output ------------------------------
+locale_dirs = ['locale/']
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
new file mode 100644
index 0000000..b8445c5
--- /dev/null
+++ b/releasenotes/source/index.rst
@@ -0,0 +1,8 @@
+============================================
+ cinder-fusioncompute Release Notes
+============================================
+
+.. toctree::
+ :maxdepth: 1
+
+ unreleased
diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst
new file mode 100644
index 0000000..cd22aab
--- /dev/null
+++ b/releasenotes/source/unreleased.rst
@@ -0,0 +1,5 @@
+==============================
+ Current Series Release Notes
+==============================
+
+.. release-notes::
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..621be2f
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,18 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
+pbr>=1.8 # Apache-2.0
+oslo.config!=3.18.0,>=3.14.0 # Apache-2.0
+oslo.cache>=1.5.0 # Apache-2.0
+oslo.concurrency>=3.8.0 # Apache-2.0
+oslo.context>=2.9.0 # Apache-2.0
+oslo.log>=3.11.0 # Apache-2.0
+oslo.reports>=0.6.0 # Apache-2.0
+oslo.serialization>=1.10.0 # Apache-2.0
+oslo.db!=4.13.1,!=4.13.2,>=4.11.0 # Apache-2.0
+oslo.rootwrap>=5.0.0 # Apache-2.0
+oslo.messaging>=5.2.0 # Apache-2.0
+oslo.privsep>=1.9.0 # Apache-2.0
+oslo.i18n>=2.1.0 # Apache-2.0
+oslo.service>=1.10.0 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..caa3d5a
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,51 @@
+[metadata]
+name = cinder-fusioncompute
+summary = Implementation of Cinder driver for Huawei Fusioncompute.
+description-file =
+ README.rst
+author = OpenStack
+author-email = openstack-dev@lists.openstack.org
+home-page = http://www.openstack.org/
+classifier =
+ Environment :: OpenStack
+ Intended Audience :: Information Technology
+ Intended Audience :: System Administrators
+ License :: OSI Approved :: Apache Software License
+ Operating System :: POSIX :: Linux
+ Programming Language :: Python
+ Programming Language :: Python :: 2
+ Programming Language :: Python :: 2.7
+ Programming Language :: Python :: 3
+ Programming Language :: Python :: 3.3
+ Programming Language :: Python :: 3.4
+
+[files]
+packages =
+ cinder-fusioncompute
+
+[build_sphinx]
+source-dir = doc/source
+build-dir = doc/build
+all_files = 1
+
+[upload_sphinx]
+upload-dir = doc/build/html
+
+[compile_catalog]
+directory = cinder-fusioncompute/locale
+domain = cinder-fusioncompute
+
+[update_catalog]
+domain = cinder-fusioncompute
+output_dir = cinder-fusioncompute/locale
+input_file = cinder-fusioncompute/locale/cinder-fusioncompute.pot
+
+[extract_messages]
+keywords = _ gettext ngettext l_ lazy_gettext
+mapping_file = babel.cfg
+output_file = cinder-fusioncompute/locale/cinder-fusioncompute.pot
+
+[build_releasenotes]
+all_files = 1
+build-dir = releasenotes/build
+source-dir = releasenotes/source
\ No newline at end of file
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..056c16c
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,29 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
+import setuptools
+
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+ import multiprocessing # noqa
+except ImportError:
+ pass
+
+setuptools.setup(
+ setup_requires=['pbr'],
+ pbr=True)
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 0000000..027aaa8
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1,17 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
+hacking<0.13,>=0.12.0 # Apache-2.0
+
+coverage>=4.0 # Apache-2.0
+python-subunit>=0.0.18 # Apache-2.0/BSD
+sphinx>=1.2.1,!=1.3b1,<1.4 # BSD
+oslosphinx>=4.7.0 # Apache-2.0
+oslotest>=1.10.0 # Apache-2.0
+testrepository>=0.0.18 # Apache-2.0/BSD
+testscenarios>=0.4 # Apache-2.0/BSD
+testtools>=1.4.0 # MIT
+
+# releasenotes
+reno>=1.8.0 # Apache-2.0
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..ec4cd78
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,40 @@
+[tox]
+minversion = 2.0
+envlist = py34,py27,pypy,pep8
+skipsdist = True
+
+[testenv]
+usedevelop = True
+install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
+setenv =
+ VIRTUAL_ENV={envdir}
+ PYTHONWARNINGS=default::DeprecationWarning
+deps = -r{toxinidir}/test-requirements.txt
+commands = python setup.py test --slowest --testr-args='{posargs}'
+
+[testenv:pep8]
+commands = flake8 {posargs}
+
+[testenv:venv]
+commands = {posargs}
+
+[testenv:cover]
+commands = python setup.py test --coverage --testr-args='{posargs}'
+
+[testenv:docs]
+commands = python setup.py build_sphinx
+
+[testenv:releasenotes]
+commands =
+ sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
+
+[testenv:debug]
+commands = oslo_debug_helper {posargs}
+
+[flake8]
+# E123, E125 skipped as they are invalid PEP-8.
+
+show-source = True
+ignore = E123,E125,N342,H104,W391
+builtins = _
+exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build