From 754725a4aae12702ccccf21393af34af7454721b Mon Sep 17 00:00:00 2001 From: Alexandre Levine Date: Sat, 1 Feb 2014 16:59:00 +0400 Subject: [PATCH] GCE-API support service for OpenStack Change-Id: I80f6b024cb40ba31ebaacf35d7364f66115da9c6 Implements: blueprint gce-api --- .testr.conf | 4 + HACKING.rst | 43 + LICENSE | 176 + MANIFEST.in | 20 + README.rst | 228 + babel.cfg | 1 + bin/gceapi-db-setup | 291 + etc/gceapi/api-paste.ini | 48 + gceapi/__init__.py | 31 + gceapi/api/__init__.py | 253 + gceapi/api/address_api.py | 26 + gceapi/api/address_neutron_api.py | 120 + gceapi/api/address_nova_api.py | 124 + gceapi/api/addresses.py | 49 + gceapi/api/base_api.py | 213 + gceapi/api/clients.py | 141 + gceapi/api/common.py | 389 + gceapi/api/compute/v1.json | 6734 +++++++++++++++++ gceapi/api/discovery.py | 63 + gceapi/api/disk_api.py | 161 + gceapi/api/disks.py | 69 + gceapi/api/firewall_api.py | 256 + gceapi/api/firewalls.py | 45 + gceapi/api/image_api.py | 142 + gceapi/api/images.py | 44 + gceapi/api/instance_address_api.py | 162 + gceapi/api/instance_api.py | 377 + gceapi/api/instance_disk_api.py | 146 + gceapi/api/instances.py | 157 + gceapi/api/machine_type_api.py | 68 + gceapi/api/machine_types.py | 49 + gceapi/api/network_api.py | 26 + gceapi/api/network_neutron_api.py | 143 + gceapi/api/network_nova_api.py | 94 + gceapi/api/networks.py | 40 + gceapi/api/oauth.py | 239 + gceapi/api/operation_api.py | 168 + gceapi/api/operation_util.py | 73 + gceapi/api/operations.py | 31 + gceapi/api/project_api.py | 103 + gceapi/api/projects.py | 104 + gceapi/api/region_api.py | 43 + gceapi/api/regions.py | 41 + gceapi/api/route_api.py | 26 + gceapi/api/route_neutron_api.py | 409 + gceapi/api/route_nova_api.py | 41 + gceapi/api/routes.py | 52 + gceapi/api/scopes.py | 120 + gceapi/api/snapshot_api.py | 113 + gceapi/api/snapshots.py | 51 + gceapi/api/utils.py | 160 + gceapi/api/wsgi.py | 223 + gceapi/api/zone_api.py | 68 + gceapi/api/zones.py | 38 + gceapi/auth.py | 142 + gceapi/cmd/__init__.py | 19 + gceapi/cmd/api.py | 51 + gceapi/cmd/manage.py | 75 + gceapi/config.py | 35 + gceapi/context.py | 162 + gceapi/db/__init__.py | 19 + gceapi/db/api.py | 58 + gceapi/db/migration.py | 73 + gceapi/db/sqlalchemy/__init__.py | 13 + gceapi/db/sqlalchemy/api.py | 133 + gceapi/db/sqlalchemy/migrate_repo/README | 4 + gceapi/db/sqlalchemy/migrate_repo/__init__.py | 13 + gceapi/db/sqlalchemy/migrate_repo/manage.py | 19 + gceapi/db/sqlalchemy/migrate_repo/migrate.cfg | 20 + .../migrate_repo/versions/001_icehouse.py | 38 + .../migrate_repo/versions/__init__.py | 13 + gceapi/db/sqlalchemy/migration.py | 30 + gceapi/db/sqlalchemy/models.py | 37 + gceapi/exception.py | 1423 ++++ gceapi/openstack/__init__.py | 0 gceapi/openstack/common/__init__.py | 0 gceapi/openstack/common/db/__init__.py | 0 gceapi/openstack/common/db/api.py | 57 + gceapi/openstack/common/db/exception.py | 54 + .../common/db/sqlalchemy/__init__.py | 0 .../common/db/sqlalchemy/migration.py | 265 + .../openstack/common/db/sqlalchemy/models.py | 117 + .../common/db/sqlalchemy/provision.py | 187 + .../openstack/common/db/sqlalchemy/session.py | 867 +++ .../common/db/sqlalchemy/test_migrations.py | 269 + .../openstack/common/db/sqlalchemy/utils.py | 548 ++ gceapi/openstack/common/eventlet_backdoor.py | 144 + gceapi/openstack/common/excutils.py | 99 + gceapi/openstack/common/gettextutils.py | 440 ++ gceapi/openstack/common/importutils.py | 66 + gceapi/openstack/common/jsonutils.py | 182 + gceapi/openstack/common/local.py | 45 + gceapi/openstack/common/log.py | 657 ++ .../openstack/common/py3kcompat/__init__.py | 0 .../openstack/common/py3kcompat/urlutils.py | 67 + gceapi/openstack/common/test.py | 88 + gceapi/openstack/common/timeutils.py | 210 + gceapi/paths.py | 68 + gceapi/service.py | 263 + gceapi/test.py | 180 + gceapi/tests/__init__.py | 30 + gceapi/tests/api/__init__.py | 16 + gceapi/tests/api/common.py | 136 + gceapi/tests/api/fake_cinder_client.py | 307 + gceapi/tests/api/fake_db.py | 316 + gceapi/tests/api/fake_glance_client.py | 137 + gceapi/tests/api/fake_keystone_client.py | 38 + gceapi/tests/api/fake_neutron_client.py | 261 + gceapi/tests/api/fake_nova_client.py | 796 ++ gceapi/tests/api/fake_request.py | 84 + gceapi/tests/api/test_addresses.py | 147 + gceapi/tests/api/test_disks.py | 256 + gceapi/tests/api/test_fields.py | 65 + gceapi/tests/api/test_firewalls.py | 291 + gceapi/tests/api/test_images.py | 167 + gceapi/tests/api/test_instances.py | 310 + gceapi/tests/api/test_machine_types.py | 148 + gceapi/tests/api/test_networks.py | 117 + gceapi/tests/api/test_operations.py | 323 + gceapi/tests/api/test_projects.py | 92 + gceapi/tests/api/test_regions.py | 77 + gceapi/tests/api/test_routes.py | 255 + gceapi/tests/api/test_snapshots.py | 117 + gceapi/tests/api/test_zones.py | 72 + gceapi/tests/api/utils.py | 26 + gceapi/version.py | 91 + gceapi/wsgi.py | 485 ++ gceapi/wsgi_ext.py | 798 ++ openstack-common.conf | 7 + requirements.txt | 26 + run_tests.sh | 123 + setup.cfg | 69 + setup.py | 22 + test-requirements.txt | 14 + tools/db/schema_diff.py | 270 + tools/enable-pre-commit-hook.sh | 42 + tools/install_venv.py | 74 + tools/install_venv_common.py | 213 + tools/lintstack.py | 199 + tools/lintstack.sh | 59 + tools/patch_tox_venv.py | 50 + tools/regression_tester.py | 109 + tools/with_venv.sh | 7 + tox.ini | 56 + 144 files changed, 27584 insertions(+) create mode 100644 .testr.conf create mode 100644 HACKING.rst create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 README.rst create mode 100644 babel.cfg create mode 100755 bin/gceapi-db-setup create mode 100644 etc/gceapi/api-paste.ini create mode 100644 gceapi/__init__.py create mode 100644 gceapi/api/__init__.py create mode 100644 gceapi/api/address_api.py create mode 100644 gceapi/api/address_neutron_api.py create mode 100644 gceapi/api/address_nova_api.py create mode 100644 gceapi/api/addresses.py create mode 100644 gceapi/api/base_api.py create mode 100644 gceapi/api/clients.py create mode 100644 gceapi/api/common.py create mode 100644 gceapi/api/compute/v1.json create mode 100644 gceapi/api/discovery.py create mode 100644 gceapi/api/disk_api.py create mode 100644 gceapi/api/disks.py create mode 100644 gceapi/api/firewall_api.py create mode 100644 gceapi/api/firewalls.py create mode 100644 gceapi/api/image_api.py create mode 100644 gceapi/api/images.py create mode 100644 gceapi/api/instance_address_api.py create mode 100644 gceapi/api/instance_api.py create mode 100644 gceapi/api/instance_disk_api.py create mode 100644 gceapi/api/instances.py create mode 100644 gceapi/api/machine_type_api.py create mode 100644 gceapi/api/machine_types.py create mode 100644 gceapi/api/network_api.py create mode 100644 gceapi/api/network_neutron_api.py create mode 100644 gceapi/api/network_nova_api.py create mode 100644 gceapi/api/networks.py create mode 100644 gceapi/api/oauth.py create mode 100644 gceapi/api/operation_api.py create mode 100644 gceapi/api/operation_util.py create mode 100644 gceapi/api/operations.py create mode 100644 gceapi/api/project_api.py create mode 100644 gceapi/api/projects.py create mode 100644 gceapi/api/region_api.py create mode 100644 gceapi/api/regions.py create mode 100644 gceapi/api/route_api.py create mode 100644 gceapi/api/route_neutron_api.py create mode 100644 gceapi/api/route_nova_api.py create mode 100644 gceapi/api/routes.py create mode 100644 gceapi/api/scopes.py create mode 100644 gceapi/api/snapshot_api.py create mode 100644 gceapi/api/snapshots.py create mode 100644 gceapi/api/utils.py create mode 100644 gceapi/api/wsgi.py create mode 100644 gceapi/api/zone_api.py create mode 100644 gceapi/api/zones.py create mode 100644 gceapi/auth.py create mode 100644 gceapi/cmd/__init__.py create mode 100755 gceapi/cmd/api.py create mode 100644 gceapi/cmd/manage.py create mode 100644 gceapi/config.py create mode 100644 gceapi/context.py create mode 100644 gceapi/db/__init__.py create mode 100644 gceapi/db/api.py create mode 100644 gceapi/db/migration.py create mode 100644 gceapi/db/sqlalchemy/__init__.py create mode 100644 gceapi/db/sqlalchemy/api.py create mode 100644 gceapi/db/sqlalchemy/migrate_repo/README create mode 100644 gceapi/db/sqlalchemy/migrate_repo/__init__.py create mode 100644 gceapi/db/sqlalchemy/migrate_repo/manage.py create mode 100644 gceapi/db/sqlalchemy/migrate_repo/migrate.cfg create mode 100644 gceapi/db/sqlalchemy/migrate_repo/versions/001_icehouse.py create mode 100644 gceapi/db/sqlalchemy/migrate_repo/versions/__init__.py create mode 100644 gceapi/db/sqlalchemy/migration.py create mode 100644 gceapi/db/sqlalchemy/models.py create mode 100644 gceapi/exception.py create mode 100644 gceapi/openstack/__init__.py create mode 100644 gceapi/openstack/common/__init__.py create mode 100644 gceapi/openstack/common/db/__init__.py create mode 100644 gceapi/openstack/common/db/api.py create mode 100644 gceapi/openstack/common/db/exception.py create mode 100644 gceapi/openstack/common/db/sqlalchemy/__init__.py create mode 100644 gceapi/openstack/common/db/sqlalchemy/migration.py create mode 100644 gceapi/openstack/common/db/sqlalchemy/models.py create mode 100644 gceapi/openstack/common/db/sqlalchemy/provision.py create mode 100644 gceapi/openstack/common/db/sqlalchemy/session.py create mode 100644 gceapi/openstack/common/db/sqlalchemy/test_migrations.py create mode 100644 gceapi/openstack/common/db/sqlalchemy/utils.py create mode 100644 gceapi/openstack/common/eventlet_backdoor.py create mode 100644 gceapi/openstack/common/excutils.py create mode 100644 gceapi/openstack/common/gettextutils.py create mode 100644 gceapi/openstack/common/importutils.py create mode 100644 gceapi/openstack/common/jsonutils.py create mode 100644 gceapi/openstack/common/local.py create mode 100644 gceapi/openstack/common/log.py create mode 100644 gceapi/openstack/common/py3kcompat/__init__.py create mode 100644 gceapi/openstack/common/py3kcompat/urlutils.py create mode 100644 gceapi/openstack/common/test.py create mode 100644 gceapi/openstack/common/timeutils.py create mode 100644 gceapi/paths.py create mode 100644 gceapi/service.py create mode 100644 gceapi/test.py create mode 100644 gceapi/tests/__init__.py create mode 100644 gceapi/tests/api/__init__.py create mode 100644 gceapi/tests/api/common.py create mode 100644 gceapi/tests/api/fake_cinder_client.py create mode 100644 gceapi/tests/api/fake_db.py create mode 100644 gceapi/tests/api/fake_glance_client.py create mode 100644 gceapi/tests/api/fake_keystone_client.py create mode 100644 gceapi/tests/api/fake_neutron_client.py create mode 100644 gceapi/tests/api/fake_nova_client.py create mode 100644 gceapi/tests/api/fake_request.py create mode 100644 gceapi/tests/api/test_addresses.py create mode 100644 gceapi/tests/api/test_disks.py create mode 100644 gceapi/tests/api/test_fields.py create mode 100644 gceapi/tests/api/test_firewalls.py create mode 100644 gceapi/tests/api/test_images.py create mode 100644 gceapi/tests/api/test_instances.py create mode 100644 gceapi/tests/api/test_machine_types.py create mode 100644 gceapi/tests/api/test_networks.py create mode 100644 gceapi/tests/api/test_operations.py create mode 100644 gceapi/tests/api/test_projects.py create mode 100644 gceapi/tests/api/test_regions.py create mode 100644 gceapi/tests/api/test_routes.py create mode 100644 gceapi/tests/api/test_snapshots.py create mode 100644 gceapi/tests/api/test_zones.py create mode 100644 gceapi/tests/api/utils.py create mode 100644 gceapi/version.py create mode 100644 gceapi/wsgi.py create mode 100644 gceapi/wsgi_ext.py create mode 100644 openstack-common.conf create mode 100644 requirements.txt create mode 100755 run_tests.sh create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 test-requirements.txt create mode 100755 tools/db/schema_diff.py create mode 100755 tools/enable-pre-commit-hook.sh create mode 100644 tools/install_venv.py create mode 100644 tools/install_venv_common.py create mode 100755 tools/lintstack.py create mode 100755 tools/lintstack.sh create mode 100644 tools/patch_tox_venv.py create mode 100755 tools/regression_tester.py create mode 100755 tools/with_venv.sh create mode 100644 tox.ini diff --git a/.testr.conf b/.testr.conf new file mode 100644 index 0000000..968b110 --- /dev/null +++ b/.testr.conf @@ -0,0 +1,4 @@ +[DEFAULT] +test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ gceapi/tests $LISTOPT $IDOPTION +test_id_option=--load-list $IDFILE +test_list_option=--list diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 0000000..25c702f --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,43 @@ +Gceapi Style Commandments +========================= + +- Step 1: Read the OpenStack Style Commandments + https://github.com/openstack-dev/hacking/blob/master/doc/source/index.rst +- Step 2: Read on + +Gceapi Specific Commandments +---------------------------- + +General +------- +- Do not use locals(). Example:: + + LOG.debug(_("volume %(vol_name)s: creating size %(vol_size)sG") % + locals()) # BAD + + LOG.debug(_("volume %(vol_name)s: creating size %(vol_size)sG") % + {'vol_name': vol_name, + 'vol_size': vol_size}) # OKAY + +- Use 'raise' instead of 'raise e' to preserve original traceback or exception being reraised:: + + except Exception as e: + ... + raise e # BAD + + except Exception: + ... + raise # OKAY + + + +Creating Unit Tests +------------------- +For every new feature, unit tests should be created that both test and +(implicitly) document the usage of said feature. If submitting a patch for a +bug that had no unit test, a new passing unit test should be added. If a +submitted bug fix does have a unit test, be sure to add a new one that fails +without the patch and passes with the patch. + +For more information on creating unit tests and utilizing the testing +infrastructure in OpenStack Gceapi, please read gceapi/testing/README.rst. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..68c771a --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..45855b5 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,20 @@ +include run_tests.sh ChangeLog +include README.rst builddeb.sh +include MANIFEST.in pylintrc +include AUTHORS +include run_tests.py +include HACKING.rst +include LICENSE +include ChangeLog +include babel.cfg tox.ini +include openstack-common.conf +include gceapi/openstack/common/README +include gceapi/db/sqlalchemy/migrate_repo/README +include gceapi/db/sqlalchemy/migrate_repo/migrate.cfg +include gceapi/db/sqlalchemy/migrate_repo/versions/*.sql +graft doc +graft etc +graft gceapi/locale +graft gceapi/tests +graft tools +global-exclude *.pyc diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..6eeed6c --- /dev/null +++ b/README.rst @@ -0,0 +1,228 @@ +OpenStack Nova GCE API README +----------------------------- + +Support of GCE-API for OpenStack. + +Usage +===== + +Download gcloud from Google and install it. +There are two ways for using it: + +1. Run authorization command: + python google-cloud-sdk/platform/gcutil/gcutil --authorization_uri_base=http://localhost:8777 auth + and next any other commands: + python google-cloud-sdk/platform/gcutil/gcutil --api_host=http://localhost:8777/ --authorization_uri_base=http://localhost:8777 --project demo listzones + +2. You have to have Google account + You can activate an already-authorized account with + gcloud config set account + or authorize a new account with + gcloud auth login + Next you must authorize in Openstack by running authorization command: + gcutil --authorization_uri_base=http://localhost:8777 auth + and next you can run any other commands: + gcutil --api_host=http://localhost:8777/ --authorization_uri_base=http://localhost:8777 --project demo listzones + +Make gcutil always use your GCE API endpoint using '--api_host' flag and your GCE API +authorization endpoint using '--authorization_uri_base' flag. Also you can store this +settings in "~/.gcutil.flags" file. + +If it doesn't work by some reason check that your PYTHONPATH is exported and set correctly to something like +``/usr/lib/python2.7/dist-packages:/usr/local/lib/python2.7/dist-packages``. + +Limitations +=========== + +* Names are unique in GCE and are used for identification. Names are not unique in Nova. IDs are used instead. +Solution: GCE-managed OpenStack installation should also maintain unique naming. + +* GCE IDs are ulong (8 bytes). Openstack IDs can be different (int, string) but mostly they are GUID (16 bytes). +Solution: Since Openstack IDs are of different length and nature and because GCE API never uses ID as a parameter +now, 8-byte hashes are generated and returned for any ID to report. + +* GCE allows per-user SSH key specification, but Nova supports only one key. +Solution: Nova GCE API just uses first key. + +Authentication specifics +======================== + +GCE API uses OAuth2.0 for authentication. Simple sufficient implementation of this protocol +was added into GCE API service in nova because of its absence in keystone. +Current implementation allows operation with several OpenStack projects for +one authenticated user as Google allows. For this initial token returned during +authentication doesn't contain information about project required by keystone. +Instead another authentication happens with each request when incoming project +information is added to existing user info and new token is acquired in keystone. + +Supported Features +================== + +Standard Query Params (except for fields and prettyPrint) are not supported. + +Supported resource types + +* Addresses +* Disks +* Firewalls +* Images +* Instances +* MachineTypes +* Networks +* Projects +* Regions +* Zones + +Unsupported resource types + +* ForwardingRules +* HttpHealthChecks +* TargetPools + +In the lists below: +"+" means supported +"-" unsupported + ++Addresses + ++aggregatedList GET /project/aggregated/addresses ++delete DELETE /project/regions/region/addresses/address ++get GET /project/regions/region/addresses/address ++insert POST /project/regions/region/addresses ++list GET /project/regions/region/addresses + ++Disks + ++aggregatedList GET /project/aggregated/disks ++createSnapshot POST /project/zones/zone/disks/disk/createSnapshot ++delete DELETE /project/zones/zone/disks/disk ++get GET /project/zones/zone/disks/disk ++insert POST /project/zones/zone/disks ++list GET /project/zones/zone/disks + ++Firewalls + ++delete DELETE /project/global/firewalls/firewall ++get GET /project/global/firewalls/firewall ++insert POST /project/global/firewalls ++list GET /project/global/firewalls +-patch PATCH /project/global/firewalls/firewall +-update PUT /project/global/firewalls/firewall + +-ForwardingRules + +-aggregatedList GET /project/aggregated/forwardingRules +-delete DELETE /project/regions/region/forwardingRules/forwardingRule +-get GET /project/regions/region/forwardingRules/forwardingRule +-insert POST /project/regions/region/forwardingRules +-list GET /project/regions/region/forwardingRules +-setTarget POST /project/regions/region/forwardingRules/forwardingRule/setTarget + ++GlobalOperations + ++aggregatedList GET /project/aggregated/operations ++delete DELETE /project/global/operations/operation ++get GET /project/global/operations/operation ++list GET /project/global/operations + +-HttpHealthChecks + +-delete DELETE /project/global/httpHealthChecks/httpHealthCheck +-get GET /project/global/httpHealthChecks/httpHealthCheck +-insert POST /project/global/httpHealthChecks +-list GET /project/global/httpHealthChecks +-patch PATCH /project/global/httpHealthChecks/httpHealthCheck +-update PUT /project/global/httpHealthChecks/httpHealthCheck + ++Images + ++delete DELETE /project/global/images/image +-deprecate POST /project/global/images/image/deprecate ++get GET /project/global/images/image ++insert POST /project/global/images ++list GET /project/global/images + ++Instances + ++addAccessConfig POST /project/zones/zone/instances/instance/addAccessConfig ++aggregatedList GET /project/aggregated/instances ++attachDisk POST /project/zones/zone/instances/instance/attachDisk ++delete DELETE /project/zones/zone/instances/instance ++deleteAccessConfig POST /project/zones/zone/instances/instance/deleteAccessConfig ++detachDisk POST /project/zones/zone/instances/instance/detachDisk ++get GET /project/zones/zone/instances/instance +-getSerialPortOutput GET /project/zones/zone/instances/instance/serialPort ++insert POST /project/zones/zone/instances ++list GET /project/zones/zone/instances ++reset POST /project/zones/zone/instances/instance/reset +-setMetadata POST /project/zones/zone/instances/instance/setMetadata +-setTags POST /project/zones/zone/instances/instance/setTags +-setScheduling POST /project/zones/zone/instances/instance/setScheduling + ++MachineTypes + ++aggregatedList GET /project/aggregated/machineTypes ++get GET /project/zones/zone/machineTypes/machineType ++list GET /project/zones/zone/machineTypes + ++Networks + ++delete DELETE /project/global/networks/network ++get GET /project/global/networks/network ++insert POST /project/global/networks ++list GET /project/global/networks + ++Projects + ++get GET /project ++setCommonInstanceMetadata POST /project/setCommonInstanceMetadata + +-RegionOperations + ++delete DELETE /project/regions/region/operations/operation ++get GET /project/regions/region/operations/operation ++list GET /project/regions/region/operations + ++Regions + ++get GET /project/regions/region ++list GET /project/regions + ++Routes + ++delete DELETE /project/global/routes/route ++get GET /project/global/routes/route ++insert POST /project/global/routes ++list GET /project/global/routes + ++Snapshots + ++delete DELETE /project/global/snapshots/snapshot ++get GET /project/global/snapshots/snapshot ++list GET /project/global/snapshots + +-TargetPools + +-addHealthCheck POST /project/regions/region/targetPools/targetPool/addHealthCheck +-addInstance POST /project/regions/region/targetPools/targetPool/addInstance +-aggregatedList GET /project/aggregated/targetPools +-delete DELETE /project/regions/region/targetPools/targetPool +-get GET /project/regions/region/targetPools/targetPool +-getHealth POST /project/regions/region/targetPools/targetPool/getHealth +-insert POST /project/regions/region/targetPools +-list GET /project/regions/region/targetPools +-removeHealthCheck POST /project/regions/region/targetPools/targetPool/removeHealthCheck +-removeInstance POST /project/regions/region/targetPools/targetPool/removeInstance +-setBackup POST /project/regions/region/targetPools/targetPool/setBackup + ++ZoneOperations + ++delete DELETE /project/zones/zone/operations/operation ++get GET /project/zones/zone/operations/operation ++list GET /project/zones/zone/operations + ++Zones + ++get GET /project/zones/zone ++list GET /project/zones + diff --git a/babel.cfg b/babel.cfg new file mode 100644 index 0000000..efceab8 --- /dev/null +++ b/babel.cfg @@ -0,0 +1 @@ +[python: **.py] diff --git a/bin/gceapi-db-setup b/bin/gceapi-db-setup new file mode 100755 index 0000000..df4a432 --- /dev/null +++ b/bin/gceapi-db-setup @@ -0,0 +1,291 @@ +#!/bin/bash +# +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +# +# Print --help output and exit. +# +usage() { + +cat << EOF +Set up a local MySQL database for use with gceapi. +This script will create a 'gceapi' database that is accessible +only on localhost by user 'gceapi' with password 'gceapi'. + +Usage: gceapi-db-setup [options] +Options: + select a distro type (rpm or debian) + + --help | -h + Print usage information. + --password | -p + Specify the password for the 'gceapi' MySQL user that will + use to connect to the 'gceapi' MySQL database. By default, + the password 'gceapi' will be used. + --rootpw | -r + Specify the root MySQL password. If the script installs + the MySQL server, it will set the root password to this value + instead of prompting for a password. If the MySQL server is + already installed, this password will be used to connect to the + database instead of having to prompt for it. + --yes | -y + In cases where the script would normally ask for confirmation + before doing something, such as installing mysql-server, + just assume yes. This is useful if you want to run the script + non-interactively. +EOF + + exit 0 +} + +install_mysql_server() { + if [ -z "${ASSUME_YES}" ] ; then + $PACKAGE_INSTALL mysql-server + else + $PACKAGE_INSTALL -y mysql-server + fi +} + +start_mysql_server() { + $SERVICE_START +} + +MYSQL_GCEAPI_PW_DEFAULT="gceapi" +MYSQL_GCEAPI_PW=${MYSQL_GCEAPI_PW_DEFAULT} +GCEAPI_CONFIG="/etc/gceapi/gceapi.conf" +ASSUME_YES="" +ELEVATE="" + +# Check for root privileges +if [[ $EUID -ne 0 ]] ; then + echo "This operation requires superuser privileges, using sudo:" + if sudo -l > /dev/null ; then + ELEVATE="sudo" + else + exit 1 + fi +fi + +case "$1" in + rpm) + echo "Installing on an RPM system." + PACKAGE_INSTALL="$ELEVATE yum install" + PACKAGE_STATUS="rpm -q" + SERVICE_MYSQLD="mysqld" + SERVICE_START="$ELEVATE service $SERVICE_MYSQLD start" + SERVICE_STATUS="service $SERVICE_MYSQLD status" + SERVICE_ENABLE="$ELEVATE chkconfig" + ;; + deb) + echo "Installing on a Debian system." + PACKAGE_INSTALL="$ELEVATE apt-get install" + PACKAGE_STATUS="dpkg-query -s" + SERVICE_MYSQLD="mysql" + SERVICE_START="$ELEVATE service $SERVICE_MYSQLD start" + SERVICE_STATUS="$ELEVATE service $SERVICE_MYSQLD status" + SERVICE_ENABLE="" + ;; + *) + usage + ;; +esac + +while [ $# -gt 0 ] +do + case "$1" in + -h|--help) + usage + ;; + -p|--password) + shift + MYSQL_GCEAPI_PW=${1} + ;; + -r|--rootpw) + shift + MYSQL_ROOT_PW=${1} + ;; + -y|--yes) + ASSUME_YES="yes" + ;; + *) + # ignore + ;; + esac + shift +done + + +# Make sure MySQL is installed. + +NEW_MYSQL_INSTALL=0 +if ! $PACKAGE_STATUS mysql-server && ! $PACKAGE_STATUS mariadb-server > /dev/null +then + if [ -z "${ASSUME_YES}" ] ; then + printf "mysql-server is not installed. Would you like to install it now? (y/n): " + read response + case "$response" in + y|Y) + ;; + n|N) + echo "mysql-server must be installed. Please install it before proceeding." + exit 0 + ;; + *) + echo "Invalid response." + exit 1 + esac + fi + + NEW_MYSQL_INSTALL=1 + install_mysql_server +fi + + +# Make sure mysqld is running. + +if ! $SERVICE_STATUS > /dev/null +then + if [ -z "${ASSUME_YES}" ] ; then + printf "$SERVICE_MYSQLD is not running. Would you like to start it now? (y/n): " + read response + case "$response" in + y|Y) + ;; + n|N) + echo "$SERVICE_MYSQLD must be running. Please start it before proceeding." + exit 0 + ;; + *) + echo "Invalid response." + exit 1 + esac + fi + + start_mysql_server + + # If we both installed and started, ensure it starts at boot + [ $NEW_MYSQL_INSTALL -eq 1 ] && $SERVICE_ENABLE $SERVICE_MYSQLD on +fi + + +# Get MySQL root access. + +if [ $NEW_MYSQL_INSTALL -eq 1 ] +then + if [ ! "${MYSQL_ROOT_PW+defined}" ] ; then + echo "Since this is a fresh installation of MySQL, please set a password for the 'root' mysql user." + + PW_MATCH=0 + while [ $PW_MATCH -eq 0 ] + do + printf "Enter new password for 'root' mysql user: " + read -s MYSQL_ROOT_PW + echo + printf "Enter new password again: " + read -s PW2 + echo + if [ "${MYSQL_ROOT_PW}" = "${PW2}" ] ; then + PW_MATCH=1 + else + echo "Passwords did not match." + fi + done + fi + + echo "UPDATE mysql.user SET password = password('${MYSQL_ROOT_PW}') WHERE user = 'root'; DELETE FROM mysql.user WHERE user = ''; flush privileges;" | mysql -u root + if ! [ $? -eq 0 ] ; then + echo "Failed to set password for 'root' MySQL user." + exit 1 + fi +elif [ ! "${MYSQL_ROOT_PW+defined}" ] ; then + printf "Please enter the password for the 'root' MySQL user: " + read -s MYSQL_ROOT_PW + echo +fi + + +# Sanity check MySQL credentials. + +MYSQL_ROOT_PW_ARG="" +if [ "${MYSQL_ROOT_PW+defined}" ] +then + MYSQL_ROOT_PW_ARG="--password=${MYSQL_ROOT_PW}" +fi +echo "SELECT 1;" | mysql -u root ${MYSQL_ROOT_PW_ARG} > /dev/null +if ! [ $? -eq 0 ] +then + echo "Failed to connect to the MySQL server. Please check your root user credentials." + exit 1 +fi +echo "Verified connectivity to MySQL." + + +# Now create the db. + +echo "Creating 'gceapi' database." +cat << EOF | mysql -u root ${MYSQL_ROOT_PW_ARG} +CREATE DATABASE IF NOT EXISTS gceapi; +GRANT ALL ON gceapi.* TO 'gceapi'@'localhost' IDENTIFIED BY '${MYSQL_GCEAPI_PW}'; +GRANT ALL ON gceapi.* TO 'gceapi'@'%' IDENTIFIED BY '${MYSQL_GCEAPI_PW}'; +flush privileges; +EOF + + +# Make sure gceapi configuration has the right MySQL password. + +if [ "${MYSQL_GCEAPI_PW}" != "${MYSQL_GCEAPI_PW_DEFAULT}" ] ; then + echo "Updating 'gceapi' database password in ${GCEAPI_CONFIG}" + sed -i -e "s/mysql:\/\/gceapi:\(.*\)@/mysql:\/\/gceapi:${MYSQL_GCEAPI_PW}@/" ${GCEAPI_CONFIG} +fi + +# override the logging config in gceapi.conf +log_conf=$(mktemp /tmp/gceapi-logging.XXXXXXXXXX.conf) +cat < $log_conf +[loggers] +keys=root + +[handlers] +keys=consoleHandler + +[formatters] +keys=simpleFormatter + +[logger_root] +level=INFO +handlers=consoleHandler + +[handler_consoleHandler] +class=StreamHandler +formatter=simpleFormatter +args=(sys.stdout,) + +[formatter_simpleFormatter] +format=%(name)s - %(levelname)s - %(message)s +EOF + +gce-api-manage --log-config=$log_conf db_sync +rm $log_conf + +# Do a final sanity check on the database. + +echo "SELECT * FROM migrate_version;" | mysql -u gceapi --password=${MYSQL_GCEAPI_PW} gceapi > /dev/null +if ! [ $? -eq 0 ] +then + echo "Final sanity check failed." + exit 1 +fi + +echo "Complete!" diff --git a/etc/gceapi/api-paste.ini b/etc/gceapi/api-paste.ini new file mode 100644 index 0000000..e75a1cd --- /dev/null +++ b/etc/gceapi/api-paste.ini @@ -0,0 +1,48 @@ +########## +# Shared # +########## + +[filter:keystonecontext] +paste.filter_factory = gceapi.auth:GceapiKeystoneContext.factory + +[filter:authtoken] +paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory +auth_port = 35357 +auth_protocol = http +auth_version = v2.0 + +####### +# GCE # +####### +[composite:gce] +use = egg:Paste#urlmap +/: gceapi_oauth +/compute/v1/projects/: gceapi +/discovery/v1/apis/compute/: gceapi_discovery + +[composite:gceapi] +use = call:gceapi.auth:pipeline_factory +keystone = gceauthtoken authtoken keystonecontext gceexecutor + +[filter:gceauthtoken] +paste.filter_factory = gceapi.api.oauth:filter_factory + +[app:gceexecutor] +paste.app_factory = gceapi.api:APIRouter.factory + +[composite:gceapi_oauth] +use = call:gceapi.auth:pipeline_factory +noauth = gceexecutor_oauth +keystone = gceexecutor_oauth + +[app:gceexecutor_oauth] +paste.app_factory = gceapi.api:APIRouterOAuth.factory + +[composite:gceapi_discovery] +use = call:gceapi.auth:pipeline_factory +noauth = gceexecutor_discovery +keystone = gceexecutor_discovery + +[app:gceexecutor_discovery] +paste.app_factory = gceapi.api:APIRouterDiscovery.factory +#### \ No newline at end of file diff --git a/gceapi/__init__.py b/gceapi/__init__.py new file mode 100644 index 0000000..51e318a --- /dev/null +++ b/gceapi/__init__.py @@ -0,0 +1,31 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`gceapi` -- Cloud IaaS Platform +=================================== + +.. automodule:: gceapi + :platform: Unix + :synopsis: Infrastructure-as-a-Service Cloud platform. +""" + +import gettext + + +gettext.install('gceapi', unicode=1) diff --git a/gceapi/api/__init__.py b/gceapi/api/__init__.py new file mode 100644 index 0000000..2072cbd --- /dev/null +++ b/gceapi/api/__init__.py @@ -0,0 +1,253 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from gceapi.api import addresses +from gceapi.api import discovery +from gceapi.api import disks +from gceapi.api import firewalls +from gceapi.api import images +from gceapi.api import instances +from gceapi.api import machine_types +from gceapi.api import networks +from gceapi.api import oauth +from gceapi.api import operations +from gceapi.api import projects +from gceapi.api import regions +from gceapi.api import routes +from gceapi.api import snapshots +from gceapi.api import zones +from gceapi.openstack.common import log as logging +from gceapi import wsgi +from gceapi import wsgi_ext as openstack_api + +gce_opts = [ + cfg.StrOpt('keystone_gce_url', + default='http://127.0.0.1:5000/v2.0', + help='Keystone URL'), + cfg.IntOpt('gce_port', + default=8777, + help='the port of the gce api server'), + cfg.StrOpt('gce_scheme', + default='http', + help='the protocol to use when connecting to the gce api ' + 'server (http, https)'), + cfg.StrOpt('gce_path', + default='/compute/v1beta15/projects', + help='the path prefix used to call the gce api server'), + cfg.StrOpt('public_network', + default='public', + help='name of public network'), + ] + +CONF = cfg.CONF +CONF.register_opts(gce_opts) + +LOG = logging.getLogger(__name__) + + +class APIRouter(wsgi.Router): + """ + Routes requests on the GCE API to the appropriate controller + and method. + """ + + @classmethod + def factory(cls, global_config, **local_config): + """Simple paste factory, `gceapi.wsgi.Router` doesn't have one.""" + + return cls() + + def __init__(self): + mapper = openstack_api.ProjectMapper() + self.resources = {} + self._setup_routes(mapper) + super(APIRouter, self).__init__(mapper) + + def _setup_routes(self, mapper): + mapper.redirect("", "/") + + self.resources['regions'] = regions.create_resource() + self.resources['firewalls'] = firewalls.create_resource() + self.resources['disks'] = disks.create_resource() + self.resources['machineTypes'] = machine_types.create_resource() + self.resources['instances'] = instances.create_resource() + self.resources['images'] = images.create_resource() + self.resources['instances'] = instances.create_resource() + self.resources['zones'] = zones.create_resource() + self.resources['networks'] = networks.create_resource() + self.resources['instances'] = instances.create_resource() + self.resources['projects'] = projects.create_resource() + self.resources['snapshots'] = snapshots.create_resource() + self.resources['addresses'] = addresses.create_resource() + self.resources['routes'] = routes.create_resource() + self.resources['operations'] = operations.create_resource() + + mapper.resource("disks", "zones/{scope_id}/disks", + controller=self.resources['disks']) + mapper.connect("/{project_id}/aggregated/disks", + controller=self.resources['disks'], + action="aggregated_list", + conditions={"method": ["GET"]}) + mapper.connect("/{project_id}/zones/{scope_id}/disks/{id}/" + "createSnapshot", + controller=self.resources['disks'], + action="create_snapshot", + conditions={"method": ["POST"]}) + + mapper.resource("machineTypes", "zones/{scope_id}/machineTypes", + controller=self.resources['machineTypes']) + mapper.connect("/{project_id}/aggregated/machineTypes", + controller=self.resources['machineTypes'], + action="aggregated_list", + conditions={"method": ["GET"]}) + + mapper.resource("instances", "zones/{scope_id}/instances", + controller=self.resources['instances']) + mapper.connect("/{project_id}/aggregated/instances", + controller=self.resources['instances'], + action="aggregated_list", + conditions={"method": ["GET"]}) + mapper.connect("/{project_id}/zones/{scope_id}/instances/{id}/" + "addAccessConfig", + controller=self.resources['instances'], + action="add_access_config", + conditions={"method": ["POST"]}) + mapper.connect("/{project_id}/zones/{scope_id}/instances/{id}/" + "deleteAccessConfig", + controller=self.resources['instances'], + action="delete_access_config", + conditions={"method": ["POST"]}) + mapper.connect("/{project_id}/zones/{scope_id}/instances/{id}/reset", + controller=self.resources['instances'], + action="reset_instance", + conditions={"method": ["POST"]}) + mapper.connect("/{project_id}/zones/{scope_id}/instances/{id}/" + "attachDisk", + controller=self.resources['instances'], + action="attach_disk", + conditions={"method": ["POST"]}) + mapper.connect("/{project_id}/zones/{scope_id}/instances/{id}/" + "detachDisk", + controller=self.resources['instances'], + action="detach_disk", + conditions={"method": ["POST"]}) + + mapper.resource("images", "global/images", + controller=self.resources['images']) + mapper.resource("regions", "regions", + controller=self.resources['regions']) + mapper.resource("zones", "zones", + controller=self.resources['zones']) + mapper.resource("networks", "global/networks", + controller=self.resources["networks"]) + mapper.resource("firewalls", "global/firewalls", + controller=self.resources["firewalls"]) + mapper.resource("routes", "global/routes", + controller=self.resources['routes']) + + mapper.connect("/{project_id}", controller=self.resources['projects'], + action="show", conditions={"method": ["GET"]}) + mapper.connect("/{project_id}/setCommonInstanceMetadata", + controller=self.resources['projects'], + action="set_common_instance_metadata", + conditions={"method": ["POST"]}) + + mapper.resource("addresses", "regions/{scope_id}/addresses", + controller=self.resources['addresses']) + mapper.connect("/{project_id}/aggregated/addresses", + controller=self.resources['addresses'], + action="aggregated_list", + conditions={"method": ["GET"]}) + + mapper.resource("snapshots", "global/snapshots", + controller=self.resources['snapshots']) + + mapper.resource("operations", "global/operations", + controller=self.resources['operations']) + mapper.resource("operations", "regions/{scope_id}/operations", + controller=self.resources['operations']) + mapper.resource("operations", "zones/{scope_id}/operations", + controller=self.resources['operations']) + mapper.connect("/{project_id}/aggregated/operations", + controller=self.resources['operations'], + action="aggregated_list", + conditions={"method": ["GET"]}) + + +class APIRouterOAuth(wsgi.Router): + """ + Routes requests on the OAuth2.0 to the appropriate controller + and method. + """ + + @classmethod + def factory(cls, global_config, **local_config): + """Simple paste factory, `gceapi.wsgi.Router` doesn't have one.""" + + return cls() + + def __init__(self): + mapper = openstack_api.ProjectMapper() + self.resources = {} + self._setup_routes(mapper) + super(APIRouterOAuth, self).__init__(mapper) + + def _setup_routes(self, mapper): + mapper.redirect("", "/") + + self.resources['oauth'] = oauth.create_resource() + + mapper.connect("/auth", + controller=self.resources['oauth'], + action="auth", + conditions={"method": ["GET"]}) + mapper.connect("/approval", + controller=self.resources['oauth'], + action="approval", + conditions={"method": ["POST"]}) + mapper.connect("/token", + controller=self.resources['oauth'], + action="token", + conditions={"method": ["POST"]}) + + +class APIRouterDiscovery(wsgi.Router): + """ + Routes requests on the GCE discovery API to the appropriate controller + and method. + """ + + @classmethod + def factory(cls, global_config, **local_config): + """Simple paste factory, `gceapi.wsgi.Router` doesn't have one.""" + + return cls() + + def __init__(self): + mapper = openstack_api.ProjectMapper() + self.resources = {} + self._setup_routes(mapper) + super(APIRouterDiscovery, self).__init__(mapper) + + def _setup_routes(self, mapper): + mapper.redirect("", "/") + + self.resources['discovery'] = discovery.create_resource() + + mapper.connect("/{version}/rest", + controller=self.resources['discovery'], + action="discovery", + conditions={"method": ["GET"]}) diff --git a/gceapi/api/address_api.py b/gceapi/api/address_api.py new file mode 100644 index 0000000..621da21 --- /dev/null +++ b/gceapi/api/address_api.py @@ -0,0 +1,26 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import address_neutron_api +from gceapi.api import address_nova_api +from gceapi.api import base_api + + +class API(base_api.API): + """GCE Address API.""" + + NEUTRON_API_MODULE = address_neutron_api + NOVA_API_MODULE = address_nova_api + + __metaclass__ = base_api.NetSingleton diff --git a/gceapi/api/address_neutron_api.py b/gceapi/api/address_neutron_api.py new file mode 100644 index 0000000..4a0543b --- /dev/null +++ b/gceapi/api/address_neutron_api.py @@ -0,0 +1,120 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from gceapi.api import base_api +from gceapi.api import clients +from gceapi.api import network_api +from gceapi.api import operation_util +from gceapi.api import region_api +from gceapi.api import scopes +from gceapi import exception +from gceapi.openstack.common.gettextutils import _ + +CONF = cfg.CONF + + +class API(base_api.API): + """GCE Address API - neutron implementation.""" + + KIND = "address" + PERSISTENT_ATTRIBUTES = ["id", "creationTimestamp", "name", "description"] + + def __init__(self, *args, **kwargs): + super(API, self).__init__(*args, **kwargs) + self._region_api = region_api.API() + + def _get_type(self): + return self.KIND + + def _get_persistent_attributes(self): + return self.PERSISTENT_ATTRIBUTES + + def get_scopes(self, context, item): + region = item["scope"] + if region is not None: + return [scopes.RegionScope(region)] + return self._region_api.get_items_as_scopes(context) + + def get_item(self, context, name, scope=None): + return self._get_floating_ips(context, scope, name)[0] + + def get_items(self, context, scope=None): + return self._get_floating_ips(context, scope) + + def delete_item(self, context, name, scope=None): + floating_ip = self._get_floating_ips(context, scope, name)[0] + operation_util.start_operation(context) + self._delete_db_item(context, floating_ip) + clients.neutron(context).delete_floatingip(floating_ip["id"]) + + def add_item(self, context, name, body, scope=None): + if any(x["name"] == name + for x in self._get_floating_ips(context, scope)): + raise exception.InvalidInput( + _("The resource '%s' already exists.") % name) + public_network_id = network_api.API().get_public_network_id(context) + operation_util.start_operation(context) + floating_ip = clients.neutron(context).create_floatingip( + {"floatingip": {"floating_network_id": public_network_id}}) + floating_ip = self._prepare_floating_ip( + clients.nova(context), floating_ip["floatingip"], scope) + floating_ip["name"] = body["name"] + if "description" in body: + floating_ip["description"] = body["description"] + floating_ip = self._add_db_item(context, floating_ip) + return floating_ip + + def _get_floating_ips(self, context, scope, name=None): + results = clients.neutron(context).list_floatingips( + tenant_id=context.project_id)["floatingips"] + gce_floating_ips = self._get_db_items_dict(context) + nova_client = clients.nova(context) + results = [self._prepare_floating_ip(nova_client, x, scope, + gce_floating_ips.get(x["id"])) + for x in results] + unnamed_ips = self._purge_db(context, results, gce_floating_ips) + self._add_nonnamed_items(context, unnamed_ips) + if name is None: + return results + + for item in results: + if item["name"] == name: + return [item] + + raise exception.NotFound + + def _prepare_floating_ip(self, nova_client, floating_ip, scope, + db_item=None): + floating_ip["scope"] = scope + fixed_ip_address = floating_ip.get("fixed_ip_address") + floating_ip["status"] = "IN USE" if fixed_ip_address else "RESERVED" + + if fixed_ip_address is not None: + instances = nova_client.servers.list( + search_opts={"fixed_ip": fixed_ip_address}) + if instances: + floating_ip["instance_name"] = instances[0].name + floating_ip["instance_zone"] = getattr( + instances[0], "OS-EXT-AZ:availability_zone") + + return self._prepare_item(floating_ip, db_item) + + def _add_nonnamed_items(self, context, items): + for item in items: + item["name"] = ("address-" + + item["floating_ip_address"].replace(".", "-")) + item["creationTimestamp"] = "" + self._add_db_item(context, item) diff --git a/gceapi/api/address_nova_api.py b/gceapi/api/address_nova_api.py new file mode 100644 index 0000000..6051bdc --- /dev/null +++ b/gceapi/api/address_nova_api.py @@ -0,0 +1,124 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import base_api +from gceapi.api import clients +from gceapi.api import operation_util +from gceapi.api import region_api +from gceapi.api import scopes +from gceapi.api import utils +from gceapi import exception +from gceapi.openstack.common.gettextutils import _ + + +class API(base_api.API): + """GCE Address API - nova-network implementation.""" + + KIND = "address" + PERSISTENT_ATTRIBUTES = ["id", "creationTimestamp", "name", "description"] + + def __init__(self, *args, **kwargs): + super(API, self).__init__(*args, **kwargs) + self._region_api = region_api.API() + + def _get_type(self): + return self.KIND + + def _get_persistent_attributes(self): + return self.PERSISTENT_ATTRIBUTES + + def get_scopes(self, context, item): + region = item["scope"] + if region is not None: + return [scopes.RegionScope(region)] + return self._region_api.get_items_as_scopes(context) + + def get_item(self, context, name, scope=None): + client = clients.nova(context) + return self._get_floating_ips(client, context, scope, name)[0] + + def get_items(self, context, scope=None): + client = clients.nova(context) + return self._get_floating_ips(client, context, scope) + + def delete_item(self, context, name, scope=None): + client = clients.nova(context) + floating_ip = self._get_floating_ips(client, context, scope, name)[0] + operation_util.start_operation(context) + self._delete_db_item(context, floating_ip) + client.floating_ips.delete(floating_ip["id"]) + + def add_item(self, context, name, body, scope=None): + client = clients.nova(context) + if any(x["name"] == name + for x in self._get_floating_ips(client, context, scope)): + raise exception.InvalidInput( + _("The resource '%s' already exists.") % name) + operation_util.start_operation(context) + result = client.floating_ips.create() + floating_ip = self._prepare_floating_ip(client, context, result, scope) + floating_ip["name"] = body["name"] + if "description" in body: + floating_ip["description"] = body["description"] + floating_ip = self._add_db_item(context, floating_ip) + return floating_ip + + def _get_floating_ips(self, client, context, scope, name=None): + results = client.floating_ips.list() + gce_floating_ips = self._get_db_items_dict(context) + results = [self._prepare_floating_ip( + client, context, x, scope, + gce_floating_ips.get(str(x.id))) + for x in results] + unnamed_ips = self._purge_db(context, results, gce_floating_ips) + self._add_nonnamed_items(context, unnamed_ips) + + if name is None: + return results + + for item in results: + if item["name"] == name: + return [item] + + raise exception.NotFound + + def _prepare_floating_ip(self, client, context, floating_ip, scope, + db_item=None): + floating_ip = utils.to_dict(floating_ip) + fixed_ip = floating_ip.get("fixed_ip") + floating_ip = { + "fixed_ip_address": fixed_ip if fixed_ip else None, + "floating_ip_address": floating_ip["ip"], + "id": floating_ip["id"], + "port_id": None, + "tenant_id": context.project_id, + "scope": scope, + "status": "IN USE" if fixed_ip else "RESERVED", + } + + instance_id = floating_ip.get("instance_id") + if instance_id is not None: + instance = client.servers.get(instance_id) + floating_ip["instance_name"] = instance.name + floating_ip["instance_zone"] = getattr( + instance, "OS-EXT-AZ:availability_zone") + + return self._prepare_item(floating_ip, db_item) + + def _add_nonnamed_items(self, context, items): + for item in items: + item["name"] = ("address-" + + item["floating_ip_address"].replace(".", "-")) + item["creationTimestamp"] = "" + self._add_db_item(context, item) diff --git a/gceapi/api/addresses.py b/gceapi/api/addresses.py new file mode 100644 index 0000000..90036e8 --- /dev/null +++ b/gceapi/api/addresses.py @@ -0,0 +1,49 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import address_api +from gceapi.api import common as gce_common +from gceapi.api import scopes +from gceapi.api import wsgi as gce_wsgi + + +class Controller(gce_common.Controller): + """GCE Address controller""" + + def __init__(self, *args, **kwargs): + super(Controller, self).__init__(address_api.API(), + *args, **kwargs) + + def format_item(self, request, floating_ip, scope): + result_dict = { + "creationTimestamp": floating_ip.get("creationTimestamp", ""), + "status": floating_ip["status"], + "name": floating_ip["name"], + "address": floating_ip["floating_ip_address"], + } + if "description" in floating_ip: + result_dict["description"] = floating_ip["description"] + else: + result_dict["description"] = "" + + if "instance_name" in floating_ip: + result_dict["users"] = [self._qualify( + request, "instances", floating_ip["instance_name"], + scopes.ZoneScope(floating_ip["instance_zone"]))] + + return self._format_item(request, result_dict, scope) + + +def create_resource(): + return gce_wsgi.GCEResource(Controller()) diff --git a/gceapi/api/base_api.py b/gceapi/api/base_api.py new file mode 100644 index 0000000..f3db24b --- /dev/null +++ b/gceapi/api/base_api.py @@ -0,0 +1,213 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Base classes of GCE API conversion layer. + +Classes in this layer aggregate functionality of OpenStack necessary +and sufficient to handle supported GCE API requests +""" + +from oslo.config import cfg + +from gceapi import db +from gceapi import exception +from gceapi.openstack.common import timeutils + +FLAGS = cfg.CONF + + +class Singleton(type): + """Singleton metaclass. + + KIND must be overriden in classes based on this type. + """ + _instances = {} + KIND = "" + + def __call__(self, *args, **kwargs): + if not self.KIND: + raise NotImplementedError + if self.KIND not in self._instances: + singleton = super(Singleton, self).__call__(*args, **kwargs) + self._instances[self.KIND] = singleton + return self._instances[self.KIND] + + @classmethod + def get_instance(cls, kind): + """Get singleton by name.""" + + return cls._instances.get(kind) + + +class NetSingleton(Singleton): + """Proxy loader for net depended API. + + NEUTRON_API_MODULE and NOVA_API_MODULE must be overriden in classes + based on this type. + """ + + NEUTRON_API_MODULE = None + NOVA_API_MODULE = None + + def __call__(self): + net_api = FLAGS.get("network_api") + # NOTE(Alex): Initializing proper network singleton + if net_api is None or ("quantum" in net_api + or "neutron" in net_api): + return self.NEUTRON_API_MODULE.API() + else: + return self.NOVA_API_MODULE.API() + + +class API(object): + """Base GCE API abstraction class + + Inherited classes should implement one class of GCE API functionality. + There should be enough public methods implemented to cover necessary + methods of GCE API in the class. Other public methods can exist to be + invoked from other APIs of this layer. + Class in this layer should use each others functionality instead of + calling corresponding low-level routines. + Basic methods should be named including "item(s)" instead of specific + functional names. + + Descendants are stateless singletons. + Supports callbacks for interaction of APIs in this layer + """ + # TODO(Alex): Now action methods get body of parameters straight from GCE + # request while returning results in terms of Openstack to be converted + # to GCE terms in controller. In next version this layer should be revised + # to work symmetrically with incoming and outgoing data. + + __metaclass__ = Singleton + + def __init__(self, *args, **kwargs): + super(API, self).__init__(*args, **kwargs) + self._callbacks = [] + + def _get_type(self): + """GCE API object type method. Should be overriden.""" + + raise NotImplementedError + + def _get_persistent_attributes(self): + """Iterable of name of columns stored in GCE API database. + + Should be overriden. + """ + + raise NotImplementedError + + def get_item(self, context, name, scope=None): + """Returns fully filled item for particular inherited API.""" + + raise exception.NotFound + + def get_items(self, context, scope=None): + """Returns list of items.""" + + return [] + + def delete_item(self, context, name, scope=None): + """Deletes an item.""" + + raise exception.NotFound + + def add_item(self, context, name, body, scope=None): + """Creates an item. It returns created item.""" + + raise exception.NotFound + + def get_scopes(self, context, item): + """Returns which zones/regions the item belongs too.""" + + return [] + + def _process_callbacks(self, context, reason, item, **kwargs): + for cb_reason, cb_func in self._callbacks: + if cb_reason == reason: + cb_func(context, item, **kwargs) + + def _register_callback(self, reason, func): + """Callbacks registration + + Callbacks can be registered by one API to be called by another before + some action for checking possibility of the action or to process + pre-actions + """ + + self._callbacks.append((reason, func)) + + def _prepare_item(self, item, db_item): + if db_item is not None: + item.update(db_item) + return item + + def _add_db_item(self, context, item): + db_item = dict((key, item.get(key)) + for key in self._get_persistent_attributes() + if key in item) + if ("creationTimestamp" in self._get_persistent_attributes() and + "creationTimestamp" not in db_item): + # TODO(ft): Google not returns microseconds but returns + # server time zone: 2013-12-06T03:34:31.340-08:00 + utcnow = timeutils.isotime(None, True) + db_item["creationTimestamp"] = utcnow + item["creationTimestamp"] = utcnow + db.add_item(context, self._get_type(), db_item) + return item + + def _delete_db_item(self, context, item): + return db.delete_item(context, self._get_type(), item["id"]) + + def _update_db_item(self, context, item): + db_item = dict((key, item.get(key)) + for key in self._get_persistent_attributes() + if key in item) + db.update_item(context, self._get_type(), db_item) + + def _get_db_items(self, context): + return db.get_items(context, self._get_type()) + + def _get_db_items_dict(self, context): + return dict((item["id"], item) for item in self._get_db_items(context)) + + def _get_db_item_by_id(self, context, item_id): + return db.get_item_by_id(context, self._get_type(), item_id) + + def _get_db_item_by_name(self, context, name): + return db.get_item_by_name(context, self._get_type(), name) + + def _purge_db(self, context, os_items, db_items_dict): + only_os_items = [] + existed_db_items = set() + for item in os_items: + db_item = db_items_dict.get(str(item["id"])) + if db_item is None: + only_os_items.append(item) + else: + existed_db_items.add(db_item["id"]) + for item in db_items_dict.itervalues(): + if item["id"] not in existed_db_items: + self._delete_db_item(context, item) + return only_os_items + + +class _CallbackReasons(object): + check_delete = 1 + pre_delete = 2 + post_add = 3 + + +_callback_reasons = _CallbackReasons() diff --git a/gceapi/api/clients.py b/gceapi/api/clients.py new file mode 100644 index 0000000..f0e153b --- /dev/null +++ b/gceapi/api/clients.py @@ -0,0 +1,141 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from keystoneclient.v2_0 import client as kc +from novaclient import client as novaclient +from novaclient import shell as novashell +from oslo.config import cfg + +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import log as logging + +logger = logging.getLogger(__name__) + +CONF = cfg.CONF + + +try: + from neutronclient.v2_0 import client as neutronclient +except ImportError: + neutronclient = None + logger.info(_('neutronclient not available')) +try: + from cinderclient import client as cinderclient +except ImportError: + cinderclient = None + logger.info(_('cinderclient not available')) +try: + from glanceclient import client as glanceclient +except ImportError: + glanceclient = None + logger.info(_('glanceclient not available')) + + +def nova(context, service_type='compute'): + computeshell = novashell.OpenStackComputeShell() + extensions = computeshell._discover_extensions("1.1") + + args = { + 'project_id': context.project_id, + 'auth_url': CONF.keystone_gce_url, + 'service_type': service_type, + 'username': None, + 'api_key': None, + 'extensions': extensions, + } + + client = novaclient.Client(1.1, **args) + + management_url = _url_for(context, service_type=service_type) + client.client.auth_token = context.auth_token + client.client.management_url = management_url + + return client + + +def neutron(context): + if neutronclient is None: + return None + + args = { + 'auth_url': CONF.keystone_gce_url, + 'service_type': 'network', + 'token': context.auth_token, + 'endpoint_url': _url_for(context, service_type='network'), + } + + return neutronclient.Client(**args) + + +def glance(context): + if glanceclient is None: + return None + + args = { + 'auth_url': CONF.keystone_gce_url, + 'service_type': 'image', + 'token': context.auth_token, + } + + return glanceclient.Client( + "1", endpoint=_url_for(context, service_type='image'), **args) + + +def cinder(context): + if cinderclient is None: + return nova(context, 'volume') + + args = { + 'service_type': 'volume', + 'auth_url': CONF.keystone_gce_url, + 'username': None, + 'api_key': None, + } + + _cinder = cinderclient.Client('1', **args) + management_url = _url_for(context, service_type='volume') + _cinder.client.auth_token = context.auth_token + _cinder.client.management_url = management_url + + return _cinder + + +def keystone(context): + _keystone = kc.Client( + token=context.auth_token, + tenant_id=context.project_id, + auth_url=CONF.keystone_gce_url) + + return _keystone + + +def _url_for(context, **kwargs): + service_catalog = context.service_catalog + if not service_catalog: + catalog = keystone(context).service_catalog.catalog + service_catalog = catalog["serviceCatalog"] + context.service_catalog = service_catalog + + service_type = kwargs["service_type"] + for service in service_catalog: + if service["type"] != service_type: + continue + for endpoint in service["endpoints"]: + if "publicURL" in endpoint: + return endpoint["publicURL"] + else: + return None + + return None diff --git a/gceapi/api/common.py b/gceapi/api/common.py new file mode 100644 index 0000000..4efc0fb --- /dev/null +++ b/gceapi/api/common.py @@ -0,0 +1,389 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Base GCE API controller""" + +import os.path +import re +from webob import exc + +from gceapi.api import operation_api +from gceapi.api import operation_util +from gceapi.api import scopes +from gceapi.api import utils +from gceapi import exception +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import timeutils + + +class Controller(object): + """Base controller + + Implements base CRUD methods. + Individual GCE controllers should inherit this and: + - implement format_item() method, + - override _get_type() method, + - add necessary specific request handlers, + - use _api to hold instance of related GCE API (see base_api.py). + """ + + _api = None + + # Initialization + def __init__(self, api): + """Base initialization. + + Inherited classes should init _api and call super(). + """ + + self._api = api + self._type_name = self._api._get_type() + self._collection_name = utils.get_collection_name(self._type_name) + self._type_kind = utils.get_type_kind(self._type_name) + self._list_kind = utils.get_list_kind(self._type_name) + self._aggregated_kind = utils.get_aggregated_kind(self._type_name) + self._operation_api = operation_api.API() + + def process_result(self, request, action, action_result): + context = self._get_context(request) + operation = operation_util.save_operaton(context, action_result) + if operation is not None: + scope = self._operation_api.get_scopes(context, operation)[0] + action_result = self._format_operation(request, operation, scope) + + if isinstance(action_result, Exception): + return self._format_error(action_result) + if action_result is None: + return None, 204 + return self._format_output(request, action, action_result), 200 + + # Base methods, should be overriden + + def format_item(self, request, image, scope): + """Main item resource conversion routine + + Overriden in inherited classes should implement conversion of + OpenStack resource into GCE resource. + """ + + raise exc.HTTPNotImplemented + + # Actions + def index(self, req, scope_id=None): + """GCE list requests, global or with zone/region specified.""" + + context = self._get_context(req) + scope = self._get_scope(req, scope_id) + + items = self._api.get_items(context, scope) + items = [{ + "scope": scope, + "item": self.format_item(req, i, scope) + } for i in items] + items = self._filter_items(req, items) + items, next_page_token = self._page_items(req, items) + items = [i["item"] for i in items] + + return self._format_list(req, items, next_page_token, scope) + + def show(self, req, id=None, scope_id=None): + """GCE get requests, global or zone/region specified.""" + + context = self._get_context(req) + scope = self._get_scope(req, scope_id) + try: + item = self._api.get_item(context, id, scope) + return self.format_item(req, item, scope) + except (exception.NotFound, KeyError, IndexError): + msg = _("Resource '%s' could not be found") % id + raise exc.HTTPNotFound(explanation=msg) + + def aggregated_list(self, req): + """GCE aggregated list requests for all zones/regions.""" + + context = self._get_context(req) + items = list() + for item in self._api.get_items(context, None): + for scope in self._api.get_scopes(context, item): + items.append({ + "scope": scope, + "item": self.format_item(req, item, scope) + }) + items = self._filter_items(req, items) + items, next_page_token = self._page_items(req, items) + + items_by_scopes = {} + for item in items: + scope_path = item["scope"].get_path() + items_by_scope = items_by_scopes.setdefault(scope_path, + {self._collection_name: []})[self._collection_name] + items_by_scope.append(item["item"]) + + return self._format_list(req, items_by_scopes, next_page_token, + scopes.AggregatedScope()) + + def delete(self, req, id, scope_id=None): + """GCE delete requests.""" + + scope = self._get_scope(req, scope_id) + context = self._get_context(req) + operation_util.init_operation(context, "delete", + self._type_name, id, scope) + try: + self._api.delete_item(context, id, scope) + except (exception.NotFound, KeyError, IndexError): + msg = _("Resource '%s' could not be found") % id + raise exc.HTTPNotFound(explanation=msg) + + def create(self, req, body, scope_id=None): + """GCE add requests.""" + + scope = self._get_scope(req, scope_id) + context = self._get_context(req) + operation_util.init_operation(context, "insert", + self._type_name, body["name"], scope) + self._api.add_item(context, body['name'], body, scope) + + # Filtering + def _filter_items(self, req, items): + """Filtering result list + + Only one filter is supported(eg. by one field) + Only two comparison strings are supported: 'eq' and 'ne' + There are no logical expressions with fields + """ + if not items: + return items + if "filter" not in req.params: + return items + + filter_def = req.params["filter"].split() + if len(filter_def) != 3: + # TODO(apavlov): raise exception + return items + if filter_def[1] != "eq" and filter_def[1] != "ne": + # TODO(apavlov): raise exception + return items + if filter_def[0] not in items[0]["item"]: + # TODO(apavlov): raise exception + return items + + filter_field = filter_def[0] + filter_cmp = filter_def[1] == "eq" + filter_pattern = filter_def[2] + if filter_pattern[0] == "'" and filter_pattern[-1] == "'": + filter_pattern = filter_pattern[1:-1] + + result_list = list() + for item in items: + field = item["item"][filter_field] + result = re.match(filter_pattern, field) + if filter_cmp != (result is None): + result_list.append(item) + + return result_list + + # Paging + def _page_items(self, req, items): + if not items: + return items, None + if "maxResults" not in req.params: + return items, None + + limit = int(req.params["maxResults"]) + if limit >= len(items): + return items, None + + page_index = int(req.params.get("pageToken", 0)) + if page_index < 0 or page_index * limit > len(items): + # TODO(apavlov): raise exception + return [], None + + items.sort(None, lambda x: x["item"].get("name")) + start = limit * page_index + if start + limit >= len(items): + return items[start:], None + + return items[start:start + limit], str(page_index + 1) + + # Utility + def _get_context(self, req): + return req.environ['gceapi.context'] + + def _get_scope(self, req, scope_id): + scope = scopes.construct_from_path(req.path_info, scope_id) + if scope is None: + return None + scope_api = scope.get_scope_api() + if scope_api is not None: + try: + context = self._get_context(req) + scope_api.get_item(context, scope.get_name(), None) + except ValueError as ex: + raise exc.HTTPNotFound(detail=ex) + + return scope + + # Result formatting + def _format_date(self, date_string): + """Returns standard format for given date.""" + if date_string is None: + return None + if isinstance(date_string, basestring): + date_string = timeutils.parse_isotime(date_string) + return date_string.strftime('%Y-%m-%dT%H:%M:%SZ') + + def _get_id(self, link): + hashed_link = hash(link) + if hashed_link < 0: + hashed_link = -hashed_link + return str(hashed_link) + + def _qualify(self, request, controller, identifier, scope): + """Creates fully qualified selfLink for an item or collection + + Specific formatting for projects and zones/regions, + 'global' prefix For global resources, + 'zones/zone_id' prefix for zone(similar for regions) resources. + """ + + result = os.path.join( + request.application_url, self._get_context(request).project_name) + if controller: + if scope: + result = os.path.join(result, scope.get_path()) + result = os.path.join(result, controller) + if identifier: + result = os.path.join(result, identifier) + return result + + def _format_item(self, request, result_dict, scope): + return self._add_item_header(request, result_dict, scope, + self._type_kind, self._collection_name) + + def _format_operation(self, request, operation, scope): + result_dict = { + "name": operation["name"], + "operationType": operation["type"], + "insertTime": operation["insert_time"], + "startTime": operation["start_time"], + "status": operation["status"], + "progress": operation["progress"], + "user": operation["user"], + } + result_dict["targetLink"] = self._qualify( + request, utils.get_collection_name(operation["target_type"]), + operation["target_name"], scope) + result_dict["targetId"] = self._get_id(result_dict["targetLink"]) + if "end_time" in operation: + result_dict["endTime"] = operation["end_time"] + if "error_code" in operation: + result_dict.update({ + "httpErrorStatusCode": operation["error_code"], + "httpErrorMessage": operation["error_message"], + "error": {"errors": operation["errors"]}, + }) + type_name = self._operation_api._get_type() + return self._add_item_header(request, result_dict, scope, + utils.get_type_kind(type_name), + utils.get_collection_name(type_name)) + + def _add_item_header(self, request, result_dict, scope, + _type_kind, _collection_name): + if scope is not None and scope.get_name() is not None: + result_dict[scope.get_type()] = self._qualify( + request, scope.get_collection(), scope.get_name(), None) + result_dict["kind"] = _type_kind + result_dict["selfLink"] = self._qualify( + request, _collection_name, result_dict.get("name"), scope) + result_dict["id"] = self._get_id(result_dict["selfLink"]) + return result_dict + + def _format_list(self, request, result_list, next_page_token, scope): + result_dict = {} + result_dict["items"] = result_list + if next_page_token: + result_dict["nextPageToken"] = next_page_token + result_dict["kind"] = (self._aggregated_kind + if scope and isinstance(scope, scopes.AggregatedScope) + else self._list_kind) + + context = self._get_context(request) + list_id = os.path.join("projects", context.project_name) + if scope: + list_id = os.path.join(list_id, scope.get_path()) + list_id = os.path.join(list_id, self._collection_name) + result_dict["id"] = list_id + + result_dict["selfLink"] = self._qualify( + request, self._collection_name, None, scope) + return result_dict + + def _format_error(self, ex_value): + if isinstance(ex_value, exception.NotAuthorized): + msg = _('Unauthorized') + code = 401 + elif isinstance(ex_value, exc.HTTPException): + msg = ex_value.explanation + code = ex_value.code + elif isinstance(ex_value, exception.GceapiException): + msg = ex_value.args[0] + code = ex_value.code + else: + msg = _('Internal server error') + code = 500 + + return { + 'error': {'errors': [{'message': msg}]}, + 'code': code, + 'message': msg + }, code + + def _format_output(self, request, action, action_result): + # TODO(ft): this metod must be safe and ignore unknown fields + fields = request.params.get('fields', None) + # TODO(ft): GCE can also format results of other action + if action not in ('index', 'show') or fields is None: + return action_result + + if action == 'show': + action_result = utils.apply_template(fields, action_result) + return action_result + sp = utils.split_by_comma(fields) + top_level = [] + items = [] + for string in sp: + if 'items' in string: + items.append(string) + else: + top_level.append(string) + res = {} + if len(items) > 0: + res['items'] = [] + for string in top_level: + dct = utils.apply_template(string, action_result) + for key, val in dct.items(): + res[key] = val + for string in items: + if '(' in string: + dct = utils.apply_template(string, action_result) + for key, val in dct.items(): + res[key] = val + elif string.startswith('items/'): + string = string[len('items/'):] + for element in action_result['items']: + dct = utils.apply_template(string, element) + res['items'].append(dct) + + return res diff --git a/gceapi/api/compute/v1.json b/gceapi/api/compute/v1.json new file mode 100644 index 0000000..392b9e8 --- /dev/null +++ b/gceapi/api/compute/v1.json @@ -0,0 +1,6734 @@ +{ + "kind": "discovery#restDescription", + "discoveryVersion": "v1", + "id": "compute:v1", + "name": "compute", + "version": "v1", + "revision": "20131120", + "title": "Compute Engine API", + "description": "API for the Google Compute Engine service.", + "ownerDomain": "google.com", + "ownerName": "Google", + "icons": { + "x16": "http://www.google.com/images/icons/product/compute_engine-16.png", + "x32": "http://www.google.com/images/icons/product/compute_engine-32.png" + }, + "documentationLink": "https://developers.google.com/compute/docs/reference/v1", + "protocol": "rest", + "baseUrl": "{HOST_URL}/compute/v1/projects/", + "basePath": "/compute/v1/projects/", + "rootUrl": "{HOST_URL}/", + "servicePath": "compute/v1/projects/", + "batchPath": "batch", + "parameters": { + "alt": { + "type": "string", + "description": "Data format for the response.", + "default": "json", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query" + }, + "fields": { + "type": "string", + "description": "Selector specifying which fields to include in a partial response.", + "location": "query" + }, + "key": { + "type": "string", + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query" + }, + "oauth_token": { + "type": "string", + "description": "OAuth 2.0 token for the current user.", + "location": "query" + }, + "prettyPrint": { + "type": "boolean", + "description": "Returns response with indentations and line breaks.", + "default": "true", + "location": "query" + }, + "quotaUser": { + "type": "string", + "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.", + "location": "query" + }, + "userIp": { + "type": "string", + "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.", + "location": "query" + } + }, + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/compute": { + "description": "View and manage your Google Compute Engine resources" + }, + "https://www.googleapis.com/auth/compute.readonly": { + "description": "View your Google Compute Engine resources" + }, + "https://www.googleapis.com/auth/devstorage.full_control": { + "description": "Manage your data and permissions in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_only": { + "description": "View your data in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_write": { + "description": "Manage your data in Google Cloud Storage" + } + } + } + }, + "schemas": { + "AccessConfig": { + "id": "AccessConfig", + "type": "object", + "description": "An access configuration attached to an instance's network interface.", + "properties": { + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#accessConfig" + }, + "name": { + "type": "string", + "description": "Name of this access configuration." + }, + "natIP": { + "type": "string", + "description": "An external IP address associated with this instance. Specify an unused static IP address available to the project. If not specified, the external IP will be drawn from a shared ephemeral pool." + }, + "type": { + "type": "string", + "description": "Type of configuration. Must be set to \"ONE_TO_ONE_NAT\". This configures port-for-port NAT to the internet.", + "default": "ONE_TO_ONE_NAT", + "enum": [ + "ONE_TO_ONE_NAT" + ], + "enumDescriptions": [ + "" + ] + } + } + }, + "Address": { + "id": "Address", + "type": "object", + "description": "A reserved address resource.", + "properties": { + "address": { + "type": "string", + "description": "The IP address represented by this resource." + }, + "creationTimestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format (output only)." + }, + "description": { + "type": "string", + "description": "An optional textual description of the resource; provided by the client when the resource is created." + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#address" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.addresses.insert" + ] + } + }, + "region": { + "type": "string", + "description": "URL of the region where the address resides (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for the resource (output only)." + }, + "status": { + "type": "string", + "description": "The status of the address (output only).", + "enum": [ + "IN_USE", + "RESERVED" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "users": { + "type": "array", + "description": "The resources that are using this address resource.", + "items": { + "type": "string" + } + } + } + }, + "AddressAggregatedList": { + "id": "AddressAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "object", + "description": "A map of scoped address lists.", + "additionalProperties": { + "$ref": "AddressesScopedList", + "description": "Name of the scope containing this set of addresses." + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#addressAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "AddressList": { + "id": "AddressList", + "type": "object", + "description": "Contains a list of address resources.", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "array", + "description": "The address resources.", + "items": { + "$ref": "Address" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#addressList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "AddressesScopedList": { + "id": "AddressesScopedList", + "type": "object", + "properties": { + "addresses": { + "type": "array", + "description": "List of addresses contained in this scope.", + "items": { + "$ref": "Address" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "The warning type identifier for this warning.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "Metadata for this warning in 'key: value' format.", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "A key for the warning data." + }, + "value": { + "type": "string", + "description": "A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "Optional human-readable details for this warning." + } + } + } + } + }, + "AttachedDisk": { + "id": "AttachedDisk", + "type": "object", + "description": "An instance-attached disk resource.", + "properties": { + "boot": { + "type": "boolean", + "description": "Indicates that this is a boot disk. VM will use the first partition of the disk for its root filesystem." + }, + "deviceName": { + "type": "string", + "description": "Persistent disk only; must be unique within the instance when specified. This represents a unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance. If not specified, a default will be chosen by the system." + }, + "index": { + "type": "integer", + "description": "A zero-based index to assign to this disk, where 0 is reserved for the boot disk. If not specified, the server will choose an appropriate value (output only).", + "format": "int32" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#attachedDisk" + }, + "mode": { + "type": "string", + "description": "The mode in which to attach this disk, either \"READ_WRITE\" or \"READ_ONLY\".", + "enum": [ + "READ_ONLY", + "READ_WRITE" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "source": { + "type": "string", + "description": "Persistent disk only; the URL of the persistent disk resource." + }, + "type": { + "type": "string", + "description": "Type of the disk, either \"SCRATCH\" or \"PERSISTENT\". Note that persistent disks must be created before you can specify them here.", + "enum": [ + "PERSISTENT", + "SCRATCH" + ], + "enumDescriptions": [ + "", + "" + ], + "annotations": { + "required": [ + "compute.instances.insert" + ] + } + } + } + }, + "DeprecationStatus": { + "id": "DeprecationStatus", + "type": "object", + "description": "Deprecation status for a public resource.", + "properties": { + "deleted": { + "type": "string", + "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to DELETED." + }, + "deprecated": { + "type": "string", + "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to DEPRECATED." + }, + "obsolete": { + "type": "string", + "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to OBSOLETE." + }, + "replacement": { + "type": "string", + "description": "A URL of the suggested replacement for the deprecated resource. The deprecated resource and its replacement must be resources of the same kind." + }, + "state": { + "type": "string", + "description": "The deprecation state. Can be \"DEPRECATED\", \"OBSOLETE\", or \"DELETED\". Operations which create a new resource using a \"DEPRECATED\" resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. New uses of \"OBSOLETE\" or \"DELETED\" resources will result in an error.", + "enum": [ + "DELETED", + "DEPRECATED", + "OBSOLETE" + ], + "enumDescriptions": [ + "", + "", + "" + ] + } + } + }, + "Disk": { + "id": "Disk", + "type": "object", + "description": "A persistent disk resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format (output only)." + }, + "description": { + "type": "string", + "description": "An optional textual description of the resource; provided by the client when the resource is created." + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#disk" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.disks.insert" + ] + } + }, + "options": { + "type": "string", + "description": "Internal use only." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for the resource (output only)." + }, + "sizeGb": { + "type": "string", + "description": "Size of the persistent disk, specified in GB. This parameter is optional when creating a disk from a disk image or a snapshot, otherwise it is required.", + "format": "int64" + }, + "sourceSnapshot": { + "type": "string", + "description": "The source snapshot used to create this disk. Once the source snapshot has been deleted from the system, this field will be cleared, and will not be set even if a snapshot with the same name has been re-created." + }, + "sourceSnapshotId": { + "type": "string", + "description": "The 'id' value of the snapshot used to create this disk. This value may be used to determine whether the disk was created from the current or a previous instance of a given disk snapshot." + }, + "status": { + "type": "string", + "description": "The status of disk creation (output only).", + "enum": [ + "CREATING", + "FAILED", + "READY", + "RESTORING" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "zone": { + "type": "string", + "description": "URL of the zone where the disk resides (output only)." + } + } + }, + "DiskAggregatedList": { + "id": "DiskAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "object", + "description": "A map of scoped disk lists.", + "additionalProperties": { + "$ref": "DisksScopedList", + "description": "Name of the scope containing this set of disks." + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#diskAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "DiskList": { + "id": "DiskList", + "type": "object", + "description": "Contains a list of persistent disk resources.", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "array", + "description": "The persistent disk resources.", + "items": { + "$ref": "Disk" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#diskList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "DisksScopedList": { + "id": "DisksScopedList", + "type": "object", + "properties": { + "disks": { + "type": "array", + "description": "List of disks contained in this scope.", + "items": { + "$ref": "Disk" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of disks when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "The warning type identifier for this warning.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "Metadata for this warning in 'key: value' format.", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "A key for the warning data." + }, + "value": { + "type": "string", + "description": "A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "Optional human-readable details for this warning." + } + } + } + } + }, + "Firewall": { + "id": "Firewall", + "type": "object", + "description": "A firewall resource.", + "properties": { + "allowed": { + "type": "array", + "description": "The list of rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.", + "items": { + "type": "object", + "properties": { + "IPProtocol": { + "type": "string", + "description": "Required; this is the IP protocol that is allowed for this rule. This can either be one of the following well known protocol strings [\"tcp\", \"udp\", \"icmp\", \"esp\", \"ah\", \"sctp\"], or the IP protocol number." + }, + "ports": { + "type": "array", + "description": "An optional list of ports which are allowed. It is an error to specify this for any protocol that isn't UDP or TCP. Each entry must be either an integer or a range. If not specified, connections through any port are allowed.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"] and [\"12345-12349\"].", + "items": { + "type": "string" + } + } + } + } + }, + "creationTimestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format (output only)." + }, + "description": { + "type": "string", + "description": "An optional textual description of the resource; provided by the client when the resource is created." + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#firewall" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.firewalls.insert", + "compute.firewalls.patch" + ] + } + }, + "network": { + "type": "string", + "description": "URL of the network to which this firewall is applied; provided by the client when the firewall is created.", + "annotations": { + "required": [ + "compute.firewalls.insert", + "compute.firewalls.patch" + ] + } + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for the resource (output only)." + }, + "sourceRanges": { + "type": "array", + "description": "A list of IP address blocks expressed in CIDR format which this rule applies to. One or both of sourceRanges and sourceTags may be set; an inbound connection is allowed if either the range or the tag of the source matches.", + "items": { + "type": "string" + } + }, + "sourceTags": { + "type": "array", + "description": "A list of instance tags which this rule applies to. One or both of sourceRanges and sourceTags may be set; an inbound connection is allowed if either the range or the tag of the source matches.", + "items": { + "type": "string" + } + }, + "targetTags": { + "type": "array", + "description": "A list of instance tags indicating sets of instances located on network which may make network connections as specified in allowed. If no targetTags are specified, the firewall rule applies to all instances on the specified network.", + "items": { + "type": "string" + } + } + } + }, + "FirewallList": { + "id": "FirewallList", + "type": "object", + "description": "Contains a list of firewall resources.", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "array", + "description": "The firewall resources.", + "items": { + "$ref": "Firewall" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#firewallList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "ForwardingRule": { + "id": "ForwardingRule", + "type": "object", + "description": "A ForwardingRule resource. A ForwardingRule resource specifies which pool of target VMs to forward a packet to if it matches the given [IPAddress, IPProtocol, portRange] tuple.", + "properties": { + "IPAddress": { + "type": "string", + "description": "Value of the reserved IP address that this forwarding rule is serving on behalf of. The address resource must live in the same region as the forwarding rule. If left empty (default value), an ephemeral IP will be assigned." + }, + "IPProtocol": { + "type": "string", + "description": "The IP protocol to which this rule applies, valid options are 'TCP', 'UDP', 'ESP', 'AH' or 'SCTP'", + "enum": [ + "AH", + "ESP", + "SCTP", + "TCP", + "UDP" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ] + }, + "creationTimestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format (output only)." + }, + "description": { + "type": "string", + "description": "An optional textual description of the resource; provided by the client when the resource is created." + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#forwardingRule" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "portRange": { + "type": "string", + "description": "Applicable only when 'IPProtocol' is 'TCP', 'UDP' or 'SCTP', only packets addressed to ports in the specified range will be forwarded to 'target'. If 'portRange' is left empty (default value), all ports are forwarded. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges. @pattern: \\d+(?:-\\d+)?" + }, + "region": { + "type": "string", + "description": "URL of the region where the forwarding rule resides (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for the resource (output only)." + }, + "target": { + "type": "string", + "description": "The URL of the target resource to receive the matched traffic. It must live in the same region as this forwarding rule." + } + } + }, + "ForwardingRuleAggregatedList": { + "id": "ForwardingRuleAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "object", + "description": "A map of scoped forwarding rule lists.", + "additionalProperties": { + "$ref": "ForwardingRulesScopedList", + "description": "Name of the scope containing this set of addresses." + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#forwardingRuleAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "ForwardingRuleList": { + "id": "ForwardingRuleList", + "type": "object", + "description": "Contains a list of ForwardingRule resources.", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "array", + "description": "The ForwardingRule resources.", + "items": { + "$ref": "ForwardingRule" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#forwardingRuleList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "ForwardingRulesScopedList": { + "id": "ForwardingRulesScopedList", + "type": "object", + "properties": { + "forwardingRules": { + "type": "array", + "description": "List of forwarding rules contained in this scope.", + "items": { + "$ref": "ForwardingRule" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of forwarding rules when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "The warning type identifier for this warning.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "Metadata for this warning in 'key: value' format.", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "A key for the warning data." + }, + "value": { + "type": "string", + "description": "A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "Optional human-readable details for this warning." + } + } + } + } + }, + "HealthCheckReference": { + "id": "HealthCheckReference", + "type": "object", + "properties": { + "healthCheck": { + "type": "string" + } + } + }, + "HealthStatus": { + "id": "HealthStatus", + "type": "object", + "properties": { + "healthState": { + "type": "string", + "description": "Health state of the instance.", + "enum": [ + "HEALTHY", + "UNHEALTHY" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "instance": { + "type": "string", + "description": "URL of the instance resource." + }, + "ipAddress": { + "type": "string", + "description": "The IP address represented by this resource." + } + } + }, + "HttpHealthCheck": { + "id": "HttpHealthCheck", + "type": "object", + "description": "An HttpHealthCheck resource. This resource defines a template for how individual VMs should be checked for health, via HTTP.", + "properties": { + "checkIntervalSec": { + "type": "integer", + "description": "How often (in seconds) to send a health check. The default value is 5 seconds.", + "format": "int32" + }, + "creationTimestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format (output only)." + }, + "description": { + "type": "string", + "description": "An optional textual description of the resource; provided by the client when the resource is created." + }, + "healthyThreshold": { + "type": "integer", + "description": "A so-far unhealthy VM will be marked healthy after this many consecutive successes. The default value is 2.", + "format": "int32" + }, + "host": { + "type": "string", + "description": "The value of the host header in the HTTP health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used." + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#httpHealthCheck" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "port": { + "type": "integer", + "description": "The TCP port number for the HTTP health check request. The default value is 80.", + "format": "int32" + }, + "requestPath": { + "type": "string", + "description": "The request path of the HTTP health check request. The default value is \"/\"." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for the resource (output only)." + }, + "timeoutSec": { + "type": "integer", + "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds.", + "format": "int32" + }, + "unhealthyThreshold": { + "type": "integer", + "description": "A so-far healthy VM will be marked unhealthy after this many consecutive failures. The default value is 2.", + "format": "int32" + } + } + }, + "HttpHealthCheckList": { + "id": "HttpHealthCheckList", + "type": "object", + "description": "Contains a list of HttpHealthCheck resources.", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "array", + "description": "The HttpHealthCheck resources.", + "items": { + "$ref": "HttpHealthCheck" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#httpHealthCheckList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "Image": { + "id": "Image", + "type": "object", + "description": "A disk image resource.", + "properties": { + "archiveSizeBytes": { + "type": "string", + "description": "Size of the image tar.gz archive stored in Google Cloud Storage (in bytes).", + "format": "int64" + }, + "creationTimestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format (output only)." + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "The deprecation status associated with this image." + }, + "description": { + "type": "string", + "description": "Textual description of the resource; provided by the client when the resource is created." + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#image" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.images.insert" + ] + } + }, + "rawDisk": { + "type": "object", + "description": "The raw disk image parameters.", + "properties": { + "containerType": { + "type": "string", + "description": "The format used to encode and transmit the block device. Should be TAR. This is just a container and transmission format and not a runtime format. Provided by the client when the disk image is created.", + "default": "TAR" + }, + "sha1Checksum": { + "type": "string", + "description": "An optional SHA1 checksum of the disk image before unpackaging; provided by the client when the disk image is created.", + "pattern": "[a-f0-9]{40}" + }, + "source": { + "type": "string", + "description": "The full Google Cloud Storage URL where the disk image is stored; provided by the client when the disk image is created.", + "annotations": { + "required": [ + "compute.images.insert" + ] + } + } + } + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for the resource (output only)." + }, + "sourceType": { + "type": "string", + "description": "Must be \"RAW\"; provided by the client when the disk image is created.", + "default": "RAW", + "enum": [ + "RAW" + ], + "enumDescriptions": [ + "" + ], + "annotations": { + "required": [ + "compute.images.insert" + ] + } + }, + "status": { + "type": "string", + "description": "Status of the image (output only). It will be one of the following READY - after image has been successfully created and is ready for use FAILED - if creating the image fails for some reason PENDING - the image creation is in progress An image can be used to create other resources suck as instances only after the image has been successfully created and the status is set to READY.", + "enum": [ + "FAILED", + "PENDING", + "READY" + ], + "enumDescriptions": [ + "", + "", + "" + ] + } + } + }, + "ImageList": { + "id": "ImageList", + "type": "object", + "description": "Contains a list of disk image resources.", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "array", + "description": "The disk image resources.", + "items": { + "$ref": "Image" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#imageList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "Instance": { + "id": "Instance", + "type": "object", + "description": "An instance resource.", + "properties": { + "canIpForward": { + "type": "boolean", + "description": "Allows this instance to send packets with source IP addresses other than its own and receive packets with destination IP addresses other than its own. If this instance will be used as an IP gateway or it will be set as the next-hop in a Route resource, say true. If unsure, leave this set to false." + }, + "creationTimestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format (output only)." + }, + "description": { + "type": "string", + "description": "An optional textual description of the resource; provided by the client when the resource is created." + }, + "disks": { + "type": "array", + "description": "Array of disks associated with this instance. Persistent disks must be created before you can assign them.", + "items": { + "$ref": "AttachedDisk" + } + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#instance" + }, + "machineType": { + "type": "string", + "description": "URL of the machine type resource describing which machine type to use to host the instance; provided by the client when the instance is created.", + "annotations": { + "required": [ + "compute.instances.insert" + ] + } + }, + "metadata": { + "$ref": "Metadata", + "description": "Metadata key/value pairs assigned to this instance. Consists of custom metadata or predefined keys; see Instance documentation for more information." + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.instances.insert" + ] + } + }, + "networkInterfaces": { + "type": "array", + "description": "Array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet. Currently, ONE_TO_ONE_NAT is the only access config supported. If there are no accessConfigs specified, then this instance will have no external internet access.", + "items": { + "$ref": "NetworkInterface" + } + }, + "scheduling": { + "$ref": "Scheduling", + "description": "Scheduling options for this instance." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + }, + "serviceAccounts": { + "type": "array", + "description": "A list of service accounts each with specified scopes, for which access tokens are to be made available to the instance through metadata queries.", + "items": { + "$ref": "ServiceAccount" + } + }, + "status": { + "type": "string", + "description": "Instance status. One of the following values: \"PROVISIONING\", \"STAGING\", \"RUNNING\", \"STOPPING\", \"STOPPED\", \"TERMINATED\" (output only).", + "enum": [ + "PROVISIONING", + "RUNNING", + "STAGING", + "STOPPED", + "STOPPING", + "TERMINATED" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "" + ] + }, + "statusMessage": { + "type": "string", + "description": "An optional, human-readable explanation of the status (output only)." + }, + "tags": { + "$ref": "Tags", + "description": "A list of tags to be applied to this instance. Used to identify valid sources or targets for network firewalls. Provided by the client on instance creation. The tags can be later modified by the setTags method. Each tag within the list must comply with RFC1035." + }, + "zone": { + "type": "string", + "description": "URL of the zone where the instance resides (output only)." + } + } + }, + "InstanceAggregatedList": { + "id": "InstanceAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "object", + "description": "A map of scoped instance lists.", + "additionalProperties": { + "$ref": "InstancesScopedList", + "description": "Name of the scope containing this set of instances." + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#instanceAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "InstanceList": { + "id": "InstanceList", + "type": "object", + "description": "Contains a list of instance resources.", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "array", + "description": "A list of instance resources.", + "items": { + "$ref": "Instance" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#instanceList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "InstanceReference": { + "id": "InstanceReference", + "type": "object", + "properties": { + "instance": { + "type": "string" + } + } + }, + "InstancesScopedList": { + "id": "InstancesScopedList", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "List of instances contained in this scope.", + "items": { + "$ref": "Instance" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of instances when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "The warning type identifier for this warning.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "Metadata for this warning in 'key: value' format.", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "A key for the warning data." + }, + "value": { + "type": "string", + "description": "A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "Optional human-readable details for this warning." + } + } + } + } + }, + "MachineType": { + "id": "MachineType", + "type": "object", + "description": "A machine type resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format (output only)." + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "The deprecation status associated with this machine type." + }, + "description": { + "type": "string", + "description": "An optional textual description of the resource." + }, + "guestCpus": { + "type": "integer", + "description": "Count of CPUs exposed to the instance.", + "format": "int32" + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "imageSpaceGb": { + "type": "integer", + "description": "Space allotted for the image, defined in GB.", + "format": "int32" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#machineType" + }, + "maximumPersistentDisks": { + "type": "integer", + "description": "Maximum persistent disks allowed.", + "format": "int32" + }, + "maximumPersistentDisksSizeGb": { + "type": "string", + "description": "Maximum total persistent disks size (GB) allowed.", + "format": "int64" + }, + "memoryMb": { + "type": "integer", + "description": "Physical memory assigned to the instance, defined in MB.", + "format": "int32" + }, + "name": { + "type": "string", + "description": "Name of the resource.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "scratchDisks": { + "type": "array", + "description": "List of extended scratch disks assigned to the instance.", + "items": { + "type": "object", + "properties": { + "diskGb": { + "type": "integer", + "description": "Size of the scratch disk, defined in GB.", + "format": "int32" + } + } + } + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for the resource (output only)." + }, + "zone": { + "type": "string", + "description": "Url of the zone where the machine type resides (output only)." + } + } + }, + "MachineTypeAggregatedList": { + "id": "MachineTypeAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "object", + "description": "A map of scoped machine type lists.", + "additionalProperties": { + "$ref": "MachineTypesScopedList", + "description": "Name of the scope containing this set of machine types." + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#machineTypeAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "MachineTypeList": { + "id": "MachineTypeList", + "type": "object", + "description": "Contains a list of machine type resources.", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "array", + "description": "The machine type resources.", + "items": { + "$ref": "MachineType" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#machineTypeList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "MachineTypesScopedList": { + "id": "MachineTypesScopedList", + "type": "object", + "properties": { + "machineTypes": { + "type": "array", + "description": "List of machine types contained in this scope.", + "items": { + "$ref": "MachineType" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of machine types when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "The warning type identifier for this warning.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "Metadata for this warning in 'key: value' format.", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "A key for the warning data." + }, + "value": { + "type": "string", + "description": "A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "Optional human-readable details for this warning." + } + } + } + } + }, + "Metadata": { + "id": "Metadata", + "type": "object", + "description": "A metadata key/value entry.", + "properties": { + "fingerprint": { + "type": "string", + "description": "Fingerprint of this resource. A hash of the metadata's contents. This field is used for optimistic locking. An up-to-date metadata fingerprint must be provided in order to modify metadata.", + "format": "byte" + }, + "items": { + "type": "array", + "description": "Array of key/value pairs. The total size of all keys and values must be less than 512 KB.", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project.", + "pattern": "[a-zA-Z0-9-_]{1,128}", + "annotations": { + "required": [ + "compute.instances.insert", + "compute.projects.setCommonInstanceMetadata" + ] + } + }, + "value": { + "type": "string", + "description": "Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 32768 bytes.", + "annotations": { + "required": [ + "compute.instances.insert", + "compute.projects.setCommonInstanceMetadata" + ] + } + } + } + } + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#metadata" + } + } + }, + "Network": { + "id": "Network", + "type": "object", + "description": "A network resource.", + "properties": { + "IPv4Range": { + "type": "string", + "description": "Required; The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}/[0-9]{1,2}", + "annotations": { + "required": [ + "compute.networks.insert" + ] + } + }, + "creationTimestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format (output only)." + }, + "description": { + "type": "string", + "description": "An optional textual description of the resource; provided by the client when the resource is created." + }, + "gatewayIPv4": { + "type": "string", + "description": "An optional address that is used for default routing to other networks. This must be within the range specified by IPv4Range, and is typically the first usable address in that range. If not specified, the default value is the first usable address in IPv4Range.", + "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}" + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#network" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.networks.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for the resource (output only)." + } + } + }, + "NetworkInterface": { + "id": "NetworkInterface", + "type": "object", + "description": "A network interface resource attached to an instance.", + "properties": { + "accessConfigs": { + "type": "array", + "description": "Array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet. Currently, ONE_TO_ONE_NAT is the only access config supported. If there are no accessConfigs specified, then this instance will have no external internet access.", + "items": { + "$ref": "AccessConfig" + } + }, + "name": { + "type": "string", + "description": "Name of the network interface, determined by the server; for network devices, these are e.g. eth0, eth1, etc. (output only)." + }, + "network": { + "type": "string", + "description": "URL of the network resource attached to this interface.", + "annotations": { + "required": [ + "compute.instances.insert" + ] + } + }, + "networkIP": { + "type": "string", + "description": "An optional IPV4 internal network address assigned to the instance for this network interface (output only)." + } + } + }, + "NetworkList": { + "id": "NetworkList", + "type": "object", + "description": "Contains a list of network resources.", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "array", + "description": "The network resources.", + "items": { + "$ref": "Network" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#networkList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "Operation": { + "id": "Operation", + "type": "object", + "description": "An operation resource, used to manage asynchronous API requests.", + "properties": { + "clientOperationId": { + "type": "string", + "description": "An optional identifier specified by the client when the mutation was initiated. Must be unique for all operation resources in the project (output only)." + }, + "creationTimestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format (output only)." + }, + "endTime": { + "type": "string", + "description": "The time that this operation was completed. This is in RFC 3339 format (output only)." + }, + "error": { + "type": "object", + "description": "If errors occurred during processing of this operation, this field will be populated (output only).", + "properties": { + "errors": { + "type": "array", + "description": "The array of errors encountered while processing this operation.", + "items": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "The error type identifier for this error." + }, + "location": { + "type": "string", + "description": "Indicates the field in the request which caused the error. This property is optional." + }, + "message": { + "type": "string", + "description": "An optional, human-readable error message." + } + } + } + } + } + }, + "httpErrorMessage": { + "type": "string", + "description": "If operation fails, the HTTP error message returned, e.g. NOT FOUND. (output only)." + }, + "httpErrorStatusCode": { + "type": "integer", + "description": "If operation fails, the HTTP error status code returned, e.g. 404. (output only).", + "format": "int32" + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "insertTime": { + "type": "string", + "description": "The time that this operation was requested. This is in RFC 3339 format (output only)." + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#operation" + }, + "name": { + "type": "string", + "description": "Name of the resource (output only)." + }, + "operationType": { + "type": "string", + "description": "Type of the operation. Examples include \"insert\", \"update\", and \"delete\" (output only)." + }, + "progress": { + "type": "integer", + "description": "An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess at when the operation will be complete. This number should be monotonically increasing as the operation progresses (output only).", + "format": "int32" + }, + "region": { + "type": "string", + "description": "URL of the region where the operation resides (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for the resource (output only)." + }, + "startTime": { + "type": "string", + "description": "The time that this operation was started by the server. This is in RFC 3339 format (output only)." + }, + "status": { + "type": "string", + "description": "Status of the operation. Can be one of the following: \"PENDING\", \"RUNNING\", or \"DONE\" (output only).", + "enum": [ + "DONE", + "PENDING", + "RUNNING" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, + "statusMessage": { + "type": "string", + "description": "An optional textual description of the current status of the operation (output only)." + }, + "targetId": { + "type": "string", + "description": "Unique target id which identifies a particular incarnation of the target (output only).", + "format": "uint64" + }, + "targetLink": { + "type": "string", + "description": "URL of the resource the operation is mutating (output only)." + }, + "user": { + "type": "string", + "description": "User who requested the operation, for example \"user@example.com\" (output only)." + }, + "warnings": { + "type": "array", + "description": "If warning messages generated during processing of this operation, this field will be populated (output only).", + "items": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "The warning type identifier for this warning.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "Metadata for this warning in 'key: value' format.", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "A key for the warning data." + }, + "value": { + "type": "string", + "description": "A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "Optional human-readable details for this warning." + } + } + } + }, + "zone": { + "type": "string", + "description": "URL of the zone where the operation resides (output only)." + } + } + }, + "OperationAggregatedList": { + "id": "OperationAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "object", + "description": "A map of scoped operation lists.", + "additionalProperties": { + "$ref": "OperationsScopedList", + "description": "Name of the scope containing this set of operations." + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#operationAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "OperationList": { + "id": "OperationList", + "type": "object", + "description": "Contains a list of operation resources.", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "array", + "description": "The operation resources.", + "items": { + "$ref": "Operation" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#operationList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "OperationsScopedList": { + "id": "OperationsScopedList", + "type": "object", + "properties": { + "operations": { + "type": "array", + "description": "List of operations contained in this scope.", + "items": { + "$ref": "Operation" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of operations when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "The warning type identifier for this warning.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "Metadata for this warning in 'key: value' format.", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "A key for the warning data." + }, + "value": { + "type": "string", + "description": "A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "Optional human-readable details for this warning." + } + } + } + } + }, + "Project": { + "id": "Project", + "type": "object", + "description": "A project resource. Projects can be created only in the APIs Console. Unless marked otherwise, values can only be modified in the console.", + "properties": { + "commonInstanceMetadata": { + "$ref": "Metadata", + "description": "Metadata key/value pairs available to all instances contained in this project." + }, + "creationTimestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format (output only)." + }, + "description": { + "type": "string", + "description": "An optional textual description of the resource." + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#project" + }, + "name": { + "type": "string", + "description": "Name of the resource." + }, + "quotas": { + "type": "array", + "description": "Quotas assigned to this project.", + "items": { + "$ref": "Quota" + } + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for the resource (output only)." + } + } + }, + "Quota": { + "id": "Quota", + "type": "object", + "description": "A quotas entry.", + "properties": { + "limit": { + "type": "number", + "description": "Quota limit for this metric.", + "format": "double" + }, + "metric": { + "type": "string", + "description": "Name of the quota metric.", + "enum": [ + "CPUS", + "DISKS", + "DISKS_TOTAL_GB", + "EPHEMERAL_ADDRESSES", + "FIREWALLS", + "FORWARDING_RULES", + "HEALTH_CHECKS", + "IMAGES", + "IMAGES_TOTAL_GB", + "INSTANCES", + "IN_USE_ADDRESSES", + "KERNELS", + "KERNELS_TOTAL_GB", + "NETWORKS", + "OPERATIONS", + "ROUTES", + "SNAPSHOTS", + "STATIC_ADDRESSES", + "TARGET_INSTANCES", + "TARGET_POOLS" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "usage": { + "type": "number", + "description": "Current usage of this metric.", + "format": "double" + } + } + }, + "Region": { + "id": "Region", + "type": "object", + "description": "Region resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format (output only)." + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "The deprecation status associated with this region." + }, + "description": { + "type": "string", + "description": "Textual description of the resource." + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#region" + }, + "name": { + "type": "string", + "description": "Name of the resource." + }, + "quotas": { + "type": "array", + "description": "Quotas assigned to this region.", + "items": { + "$ref": "Quota" + } + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for the resource (output only)." + }, + "status": { + "type": "string", + "description": "Status of the region, \"UP\" or \"DOWN\".", + "enum": [ + "DOWN", + "UP" + ], + "enumDescriptions": [ + "", + "" + ] + }, + "zones": { + "type": "array", + "description": "A list of zones homed in this region, in the form of resource URLs.", + "items": { + "type": "string" + } + } + } + }, + "RegionList": { + "id": "RegionList", + "type": "object", + "description": "Contains a list of region resources.", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "array", + "description": "The region resources.", + "items": { + "$ref": "Region" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#regionList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "Route": { + "id": "Route", + "type": "object", + "description": "The route resource. A Route is a rule that specifies how certain packets should be handled by the virtual network. Routes are associated with VMs by tag and the set of Routes for a particular VM is called its routing table. For each packet leaving a VM, the system searches that VM's routing table for a single best matching Route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the Route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching Routes. The packet is then forwarded as specified by the next_hop field of the winning Route -- either to another VM destination, a VM gateway or a GCE operated gateway. Packets that do not match any Route in the sending VM's routing table will be dropped.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format (output only)." + }, + "description": { + "type": "string", + "description": "An optional textual description of the resource; provided by the client when the resource is created." + }, + "destRange": { + "type": "string", + "description": "Which packets does this route apply to?", + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#route" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "network": { + "type": "string", + "description": "URL of the network to which this route is applied; provided by the client when the route is created.", + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "nextHopGateway": { + "type": "string", + "description": "The URL to a gateway that should handle matching packets." + }, + "nextHopInstance": { + "type": "string", + "description": "The URL to an instance that should handle matching packets." + }, + "nextHopIp": { + "type": "string", + "description": "The network IP address of an instance that should handle matching packets." + }, + "nextHopNetwork": { + "type": "string", + "description": "The URL of the local network if it should handle matching packets." + }, + "priority": { + "type": "integer", + "description": "Breaks ties between Routes of equal specificity. Routes with smaller values win when tied with routes with larger values.", + "format": "uint32", + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for the resource (output only)." + }, + "tags": { + "type": "array", + "description": "A list of instance tags to which this route applies.", + "items": { + "type": "string" + }, + "annotations": { + "required": [ + "compute.routes.insert" + ] + } + }, + "warnings": { + "type": "array", + "description": "If potential misconfigurations are detected for this route, this field will be populated with warning messages.", + "items": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "The warning type identifier for this warning.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "Metadata for this warning in 'key: value' format.", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "A key for the warning data." + }, + "value": { + "type": "string", + "description": "A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "Optional human-readable details for this warning." + } + } + } + } + } + }, + "RouteList": { + "id": "RouteList", + "type": "object", + "description": "Contains a list of route resources.", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "array", + "description": "The route resources.", + "items": { + "$ref": "Route" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#routeList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "Scheduling": { + "id": "Scheduling", + "type": "object", + "description": "Scheduling options for an Instance.", + "properties": { + "automaticRestart": { + "type": "boolean", + "description": "Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user)." + }, + "onHostMaintenance": { + "type": "string", + "description": "How the instance should behave when the host machine undergoes maintenance that may temporarily impact instance performance.", + "enum": [ + "MIGRATE", + "TERMINATE" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "SerialPortOutput": { + "id": "SerialPortOutput", + "type": "object", + "description": "An instance serial console output.", + "properties": { + "contents": { + "type": "string", + "description": "The contents of the console output." + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#serialPortOutput" + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for the resource (output only)." + } + } + }, + "ServiceAccount": { + "id": "ServiceAccount", + "type": "object", + "description": "A service account.", + "properties": { + "email": { + "type": "string", + "description": "Email address of the service account." + }, + "scopes": { + "type": "array", + "description": "The list of scopes to be made available for this service account.", + "items": { + "type": "string" + } + } + } + }, + "Snapshot": { + "id": "Snapshot", + "type": "object", + "description": "A persistent disk snapshot resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format (output only)." + }, + "description": { + "type": "string", + "description": "An optional textual description of the resource; provided by the client when the resource is created." + }, + "diskSizeGb": { + "type": "string", + "description": "Size of the persistent disk snapshot, specified in GB (output only).", + "format": "int64" + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#snapshot" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for the resource (output only)." + }, + "sourceDisk": { + "type": "string", + "description": "The source disk used to create this snapshot. Once the source disk has been deleted from the system, this field will be cleared, and will not be set even if a disk with the same name has been re-created (output only)." + }, + "sourceDiskId": { + "type": "string", + "description": "The 'id' value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name." + }, + "status": { + "type": "string", + "description": "The status of the persistent disk snapshot (output only).", + "enum": [ + "CREATING", + "DELETING", + "FAILED", + "READY", + "UPLOADING" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ] + }, + "storageBytes": { + "type": "string", + "description": "A size of the the storage used by the snapshot. As snapshots share storage this number is expected to change with snapshot creation/deletion.", + "format": "int64" + }, + "storageBytesStatus": { + "type": "string", + "description": "An indicator whether storageBytes is in a stable state, or it is being adjusted as a result of shared storage reallocation.", + "enum": [ + "UPDATING", + "UP_TO_DATE" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "SnapshotList": { + "id": "SnapshotList", + "type": "object", + "description": "Contains a list of persistent disk snapshot resources.", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "array", + "description": "The persistent snapshot resources.", + "items": { + "$ref": "Snapshot" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#snapshotList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "Tags": { + "id": "Tags", + "type": "object", + "description": "A set of instance tags.", + "properties": { + "fingerprint": { + "type": "string", + "description": "Fingerprint of this resource. A hash of the tags stored in this object. This field is used optimistic locking. An up-to-date tags fingerprint must be provided in order to modify tags.", + "format": "byte" + }, + "items": { + "type": "array", + "description": "An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035.", + "items": { + "type": "string" + } + } + } + }, + "TargetPool": { + "id": "TargetPool", + "type": "object", + "description": "A TargetPool resource. This resource defines a pool of VMs, associated HttpHealthCheck resources, and the fallback TargetPool.", + "properties": { + "backupPool": { + "type": "string", + "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool, and its 'failoverRatio' field is properly set to a value between [0, 1].\n\n'backupPool' and 'failoverRatio' together define the fallback behavior of the primary target pool: if the ratio of the healthy VMs in the primary pool is at or below 'failoverRatio', traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where 'failoverRatio' and 'backupPool' are not set, or all the VMs in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy VMs with the best effort, or to all VMs when no VM is healthy." + }, + "creationTimestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format (output only)." + }, + "description": { + "type": "string", + "description": "An optional textual description of the resource; provided by the client when the resource is created." + }, + "failoverRatio": { + "type": "number", + "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool (i.e., not as a backup pool to some other target pool). The value of the field must be in [0, 1].\n\nIf set, 'backupPool' must also be set. They together define the fallback behavior of the primary target pool: if the ratio of the healthy VMs in the primary pool is at or below this number, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where 'failoverRatio' is not set or all the VMs in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy VMs with the best effort, or to all VMs when no VM is healthy.", + "format": "float" + }, + "healthChecks": { + "type": "array", + "description": "A list of URLs to the HttpHealthCheck resource. A member VM in this pool is considered healthy if and only if all specified health checks pass. An empty list means all member VMs will be considered healthy at all times.", + "items": { + "type": "string" + } + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "instances": { + "type": "array", + "description": "A list of resource URLs to the member VMs serving this pool. They must live in zones contained in the same region as this pool.", + "items": { + "type": "string" + } + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#targetPool" + }, + "name": { + "type": "string", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "region": { + "type": "string", + "description": "URL of the region where the target pool resides (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for the resource (output only)." + }, + "sessionAffinity": { + "type": "string", + "description": "Sesssion affinity option, must be one of the following values: 'NONE': Connections from the same client IP may go to any VM in the pool; 'CLIENT_IP': Connections from the same client IP will go to the same VM in the pool while that VM remains healthy. 'CLIENT_IP_PROTO': Connections from the same client IP with the same IP protocol will go to the same VM in the pool while that VM remains healthy.", + "enum": [ + "CLIENT_IP", + "CLIENT_IP_PROTO", + "NONE" + ], + "enumDescriptions": [ + "", + "", + "" + ] + } + } + }, + "TargetPoolAggregatedList": { + "id": "TargetPoolAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "object", + "description": "A map of scoped target pool lists.", + "additionalProperties": { + "$ref": "TargetPoolsScopedList", + "description": "Name of the scope containing this set of target pools." + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetPoolAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "TargetPoolInstanceHealth": { + "id": "TargetPoolInstanceHealth", + "type": "object", + "properties": { + "healthStatus": { + "type": "array", + "items": { + "$ref": "HealthStatus" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetPoolInstanceHealth" + } + } + }, + "TargetPoolList": { + "id": "TargetPoolList", + "type": "object", + "description": "Contains a list of TargetPool resources.", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "array", + "description": "The TargetPool resources.", + "items": { + "$ref": "TargetPool" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetPoolList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + }, + "TargetPoolsAddHealthCheckRequest": { + "id": "TargetPoolsAddHealthCheckRequest", + "type": "object", + "properties": { + "healthChecks": { + "type": "array", + "description": "Health check URLs to be added to targetPool.", + "items": { + "$ref": "HealthCheckReference" + } + } + } + }, + "TargetPoolsAddInstanceRequest": { + "id": "TargetPoolsAddInstanceRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "URLs of the instances to be added to targetPool.", + "items": { + "$ref": "InstanceReference" + } + } + } + }, + "TargetPoolsRemoveHealthCheckRequest": { + "id": "TargetPoolsRemoveHealthCheckRequest", + "type": "object", + "properties": { + "healthChecks": { + "type": "array", + "description": "Health check URLs to be removed from targetPool.", + "items": { + "$ref": "HealthCheckReference" + } + } + } + }, + "TargetPoolsRemoveInstanceRequest": { + "id": "TargetPoolsRemoveInstanceRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "URLs of the instances to be removed from targetPool.", + "items": { + "$ref": "InstanceReference" + } + } + } + }, + "TargetPoolsScopedList": { + "id": "TargetPoolsScopedList", + "type": "object", + "properties": { + "targetPools": { + "type": "array", + "description": "List of target pools contained in this scope.", + "items": { + "$ref": "TargetPool" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "The warning type identifier for this warning.", + "enum": [ + "DEPRECATED_RESOURCE_USED", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "Metadata for this warning in 'key: value' format.", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "A key for the warning data." + }, + "value": { + "type": "string", + "description": "A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "Optional human-readable details for this warning." + } + } + } + } + }, + "TargetReference": { + "id": "TargetReference", + "type": "object", + "properties": { + "target": { + "type": "string" + } + } + }, + "Zone": { + "id": "Zone", + "type": "object", + "description": "A zone resource.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "Creation timestamp in RFC3339 text format (output only)." + }, + "deprecated": { + "$ref": "DeprecationStatus", + "description": "The deprecation status associated with this zone." + }, + "description": { + "type": "string", + "description": "Textual description of the resource." + }, + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only).", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "Type of the resource.", + "default": "compute#zone" + }, + "maintenanceWindows": { + "type": "array", + "description": "Scheduled maintenance windows for the zone. When the zone is in a maintenance window, all resources which reside in the zone will be unavailable.", + "items": { + "type": "object", + "properties": { + "beginTime": { + "type": "string", + "description": "Begin time of the maintenance window, in RFC 3339 format." + }, + "description": { + "type": "string", + "description": "Textual description of the maintenance window." + }, + "endTime": { + "type": "string", + "description": "End time of the maintenance window, in RFC 3339 format." + }, + "name": { + "type": "string", + "description": "Name of the maintenance window." + } + } + } + }, + "name": { + "type": "string", + "description": "Name of the resource." + }, + "region": { + "type": "string", + "description": "Full URL reference to the region which hosts the zone (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for the resource (output only)." + }, + "status": { + "type": "string", + "description": "Status of the zone. \"UP\" or \"DOWN\".", + "enum": [ + "DOWN", + "UP" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "ZoneList": { + "id": "ZoneList", + "type": "object", + "description": "Contains a list of zone resources.", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the resource; defined by the server (output only)." + }, + "items": { + "type": "array", + "description": "The zone resources.", + "items": { + "$ref": "Zone" + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#zoneList" + }, + "nextPageToken": { + "type": "string", + "description": "A token used to continue a truncated list request (output only)." + }, + "selfLink": { + "type": "string", + "description": "Server defined URL for this resource (output only)." + } + } + } + }, + "resources": { + "addresses": { + "methods": { + "aggregatedList": { + "id": "compute.addresses.aggregatedList", + "path": "{project}/aggregated/addresses", + "httpMethod": "GET", + "description": "Retrieves the list of addresses grouped by scope.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "AddressAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.addresses.delete", + "path": "{project}/regions/{region}/addresses/{address}", + "httpMethod": "DELETE", + "description": "Deletes the specified address resource.", + "parameters": { + "address": { + "type": "string", + "description": "Name of the address resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "address" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.addresses.get", + "path": "{project}/regions/{region}/addresses/{address}", + "httpMethod": "GET", + "description": "Returns the specified address resource.", + "parameters": { + "address": { + "type": "string", + "description": "Name of the address resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "address" + ], + "response": { + "$ref": "Address" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.addresses.insert", + "path": "{project}/regions/{region}/addresses", + "httpMethod": "POST", + "description": "Creates an address resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "Address" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.addresses.list", + "path": "{project}/regions/{region}/addresses", + "httpMethod": "GET", + "description": "Retrieves the list of address resources contained within the specified region.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "AddressList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "disks": { + "methods": { + "aggregatedList": { + "id": "compute.disks.aggregatedList", + "path": "{project}/aggregated/disks", + "httpMethod": "GET", + "description": "Retrieves the list of disks grouped by scope.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "DiskAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "createSnapshot": { + "id": "compute.disks.createSnapshot", + "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot", + "httpMethod": "POST", + "parameters": { + "disk": { + "type": "string", + "description": "Name of the persistent disk resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "disk" + ], + "request": { + "$ref": "Snapshot" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "delete": { + "id": "compute.disks.delete", + "path": "{project}/zones/{zone}/disks/{disk}", + "httpMethod": "DELETE", + "description": "Deletes the specified persistent disk resource.", + "parameters": { + "disk": { + "type": "string", + "description": "Name of the persistent disk resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "disk" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.disks.get", + "path": "{project}/zones/{zone}/disks/{disk}", + "httpMethod": "GET", + "description": "Returns the specified persistent disk resource.", + "parameters": { + "disk": { + "type": "string", + "description": "Name of the persistent disk resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "disk" + ], + "response": { + "$ref": "Disk" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.disks.insert", + "path": "{project}/zones/{zone}/disks", + "httpMethod": "POST", + "description": "Creates a persistent disk resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "sourceImage": { + "type": "string", + "description": "Optional. Source image to restore onto a disk.", + "location": "query" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "Disk" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.disks.list", + "path": "{project}/zones/{zone}/disks", + "httpMethod": "GET", + "description": "Retrieves the list of persistent disk resources contained within the specified zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "DiskList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "firewalls": { + "methods": { + "delete": { + "id": "compute.firewalls.delete", + "path": "{project}/global/firewalls/{firewall}", + "httpMethod": "DELETE", + "description": "Deletes the specified firewall resource.", + "parameters": { + "firewall": { + "type": "string", + "description": "Name of the firewall resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "firewall" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.firewalls.get", + "path": "{project}/global/firewalls/{firewall}", + "httpMethod": "GET", + "description": "Returns the specified firewall resource.", + "parameters": { + "firewall": { + "type": "string", + "description": "Name of the firewall resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "firewall" + ], + "response": { + "$ref": "Firewall" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.firewalls.insert", + "path": "{project}/global/firewalls", + "httpMethod": "POST", + "description": "Creates a firewall resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Firewall" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.firewalls.list", + "path": "{project}/global/firewalls", + "httpMethod": "GET", + "description": "Retrieves the list of firewall resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "FirewallList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.firewalls.patch", + "path": "{project}/global/firewalls/{firewall}", + "httpMethod": "PATCH", + "description": "Updates the specified firewall resource with the data included in the request. This method supports patch semantics.", + "parameters": { + "firewall": { + "type": "string", + "description": "Name of the firewall resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "firewall" + ], + "request": { + "$ref": "Firewall" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.firewalls.update", + "path": "{project}/global/firewalls/{firewall}", + "httpMethod": "PUT", + "description": "Updates the specified firewall resource with the data included in the request.", + "parameters": { + "firewall": { + "type": "string", + "description": "Name of the firewall resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "firewall" + ], + "request": { + "$ref": "Firewall" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "forwardingRules": { + "methods": { + "aggregatedList": { + "id": "compute.forwardingRules.aggregatedList", + "path": "{project}/aggregated/forwardingRules", + "httpMethod": "GET", + "description": "Retrieves the list of forwarding rules grouped by scope.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "ForwardingRuleAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.forwardingRules.delete", + "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + "httpMethod": "DELETE", + "description": "Deletes the specified ForwardingRule resource.", + "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "forwardingRule" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.forwardingRules.get", + "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}", + "httpMethod": "GET", + "description": "Returns the specified ForwardingRule resource.", + "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "forwardingRule" + ], + "response": { + "$ref": "ForwardingRule" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.forwardingRules.insert", + "path": "{project}/regions/{region}/forwardingRules", + "httpMethod": "POST", + "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "ForwardingRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.forwardingRules.list", + "path": "{project}/regions/{region}/forwardingRules", + "httpMethod": "GET", + "description": "Retrieves the list of ForwardingRule resources available to the specified project and region.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "ForwardingRuleList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setTarget": { + "id": "compute.forwardingRules.setTarget", + "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget", + "httpMethod": "POST", + "description": "Changes target url for forwarding rule.", + "parameters": { + "forwardingRule": { + "type": "string", + "description": "Name of the ForwardingRule resource in which target is to be set.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "forwardingRule" + ], + "request": { + "$ref": "TargetReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "globalOperations": { + "methods": { + "aggregatedList": { + "id": "compute.globalOperations.aggregatedList", + "path": "{project}/aggregated/operations", + "httpMethod": "GET", + "description": "Retrieves the list of all operations grouped by scope.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "OperationAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.globalOperations.delete", + "path": "{project}/global/operations/{operation}", + "httpMethod": "DELETE", + "description": "Deletes the specified operation resource.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the operation resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "operation" + ], + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.globalOperations.get", + "path": "{project}/global/operations/{operation}", + "httpMethod": "GET", + "description": "Retrieves the specified operation resource.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the operation resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "operation" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.globalOperations.list", + "path": "{project}/global/operations", + "httpMethod": "GET", + "description": "Retrieves the list of operation resources contained within the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "OperationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "httpHealthChecks": { + "methods": { + "delete": { + "id": "compute.httpHealthChecks.delete", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "DELETE", + "description": "Deletes the specified HttpHealthCheck resource.", + "parameters": { + "httpHealthCheck": { + "type": "string", + "description": "Name of the HttpHealthCheck resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpHealthCheck" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.httpHealthChecks.get", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "GET", + "description": "Returns the specified HttpHealthCheck resource.", + "parameters": { + "httpHealthCheck": { + "type": "string", + "description": "Name of the HttpHealthCheck resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpHealthCheck" + ], + "response": { + "$ref": "HttpHealthCheck" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.httpHealthChecks.insert", + "path": "{project}/global/httpHealthChecks", + "httpMethod": "POST", + "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "HttpHealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.httpHealthChecks.list", + "path": "{project}/global/httpHealthChecks", + "httpMethod": "GET", + "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "HttpHealthCheckList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.httpHealthChecks.patch", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "PATCH", + "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.", + "parameters": { + "httpHealthCheck": { + "type": "string", + "description": "Name of the HttpHealthCheck resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpHealthCheck" + ], + "request": { + "$ref": "HttpHealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.httpHealthChecks.update", + "path": "{project}/global/httpHealthChecks/{httpHealthCheck}", + "httpMethod": "PUT", + "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.", + "parameters": { + "httpHealthCheck": { + "type": "string", + "description": "Name of the HttpHealthCheck resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "httpHealthCheck" + ], + "request": { + "$ref": "HttpHealthCheck" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "images": { + "methods": { + "delete": { + "id": "compute.images.delete", + "path": "{project}/global/images/{image}", + "httpMethod": "DELETE", + "description": "Deletes the specified image resource.", + "parameters": { + "image": { + "type": "string", + "description": "Name of the image resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "image" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "deprecate": { + "id": "compute.images.deprecate", + "path": "{project}/global/images/{image}/deprecate", + "httpMethod": "POST", + "description": "Sets the deprecation status of an image. If no message body is given, clears the deprecation status instead.", + "parameters": { + "image": { + "type": "string", + "description": "Image name.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "image" + ], + "request": { + "$ref": "DeprecationStatus" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.images.get", + "path": "{project}/global/images/{image}", + "httpMethod": "GET", + "description": "Returns the specified image resource.", + "parameters": { + "image": { + "type": "string", + "description": "Name of the image resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "image" + ], + "response": { + "$ref": "Image" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.images.insert", + "path": "{project}/global/images", + "httpMethod": "POST", + "description": "Creates an image resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Image" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "id": "compute.images.list", + "path": "{project}/global/images", + "httpMethod": "GET", + "description": "Retrieves the list of image resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "ImageList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "instances": { + "methods": { + "addAccessConfig": { + "id": "compute.instances.addAccessConfig", + "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig", + "httpMethod": "POST", + "description": "Adds an access config to an instance's network interface.", + "parameters": { + "instance": { + "type": "string", + "description": "Instance name.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "networkInterface": { + "type": "string", + "description": "Network interface name.", + "required": true, + "location": "query" + }, + "project": { + "type": "string", + "description": "Project name.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance", + "networkInterface" + ], + "request": { + "$ref": "AccessConfig" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "aggregatedList": { + "id": "compute.instances.aggregatedList", + "path": "{project}/aggregated/instances", + "httpMethod": "GET", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "InstanceAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "attachDisk": { + "id": "compute.instances.attachDisk", + "path": "{project}/zones/{zone}/instances/{instance}/attachDisk", + "httpMethod": "POST", + "description": "Attaches a disk resource to an instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Instance name.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project name.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "AttachedDisk" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "delete": { + "id": "compute.instances.delete", + "path": "{project}/zones/{zone}/instances/{instance}", + "httpMethod": "DELETE", + "description": "Deletes the specified instance resource.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "deleteAccessConfig": { + "id": "compute.instances.deleteAccessConfig", + "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig", + "httpMethod": "POST", + "description": "Deletes an access config from an instance's network interface.", + "parameters": { + "accessConfig": { + "type": "string", + "description": "Access config name.", + "required": true, + "location": "query" + }, + "instance": { + "type": "string", + "description": "Instance name.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "networkInterface": { + "type": "string", + "description": "Network interface name.", + "required": true, + "location": "query" + }, + "project": { + "type": "string", + "description": "Project name.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance", + "accessConfig", + "networkInterface" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "detachDisk": { + "id": "compute.instances.detachDisk", + "path": "{project}/zones/{zone}/instances/{instance}/detachDisk", + "httpMethod": "POST", + "description": "Detaches a disk from an instance.", + "parameters": { + "deviceName": { + "type": "string", + "description": "Disk device name to detach.", + "required": true, + "pattern": "\\w[\\w.-]{0,254}", + "location": "query" + }, + "instance": { + "type": "string", + "description": "Instance name.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project name.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance", + "deviceName" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.instances.get", + "path": "{project}/zones/{zone}/instances/{instance}", + "httpMethod": "GET", + "description": "Returns the specified instance resource.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "response": { + "$ref": "Instance" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getSerialPortOutput": { + "id": "compute.instances.getSerialPortOutput", + "path": "{project}/zones/{zone}/instances/{instance}/serialPort", + "httpMethod": "GET", + "description": "Returns the specified instance's serial port output.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "response": { + "$ref": "SerialPortOutput" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.instances.insert", + "path": "{project}/zones/{zone}/instances", + "httpMethod": "POST", + "description": "Creates an instance resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "request": { + "$ref": "Instance" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.instances.list", + "path": "{project}/zones/{zone}/instances", + "httpMethod": "GET", + "description": "Retrieves the list of instance resources contained within the specified zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "InstanceList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "reset": { + "id": "compute.instances.reset", + "path": "{project}/zones/{zone}/instances/{instance}/reset", + "httpMethod": "POST", + "description": "Performs a hard reset on the instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "setMetadata": { + "id": "compute.instances.setMetadata", + "path": "{project}/zones/{zone}/instances/{instance}/setMetadata", + "httpMethod": "POST", + "description": "Sets metadata for the specified instance to the data included in the request.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "Metadata" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "setScheduling": { + "id": "compute.instances.setScheduling", + "path": "{project}/zones/{zone}/instances/{instance}/setScheduling", + "httpMethod": "POST", + "description": "Sets an instance's scheduling options.", + "parameters": { + "instance": { + "type": "string", + "description": "Instance name.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project name.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "Scheduling" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "setTags": { + "id": "compute.instances.setTags", + "path": "{project}/zones/{zone}/instances/{instance}/setTags", + "httpMethod": "POST", + "description": "Sets tags for the specified instance to the data included in the request.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "Tags" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "machineTypes": { + "methods": { + "aggregatedList": { + "id": "compute.machineTypes.aggregatedList", + "path": "{project}/aggregated/machineTypes", + "httpMethod": "GET", + "description": "Retrieves the list of machine type resources grouped by scope.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "MachineTypeAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "get": { + "id": "compute.machineTypes.get", + "path": "{project}/zones/{zone}/machineTypes/{machineType}", + "httpMethod": "GET", + "description": "Returns the specified machine type resource.", + "parameters": { + "machineType": { + "type": "string", + "description": "Name of the machine type resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "machineType" + ], + "response": { + "$ref": "MachineType" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.machineTypes.list", + "path": "{project}/zones/{zone}/machineTypes", + "httpMethod": "GET", + "description": "Retrieves the list of machine type resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "MachineTypeList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "networks": { + "methods": { + "delete": { + "id": "compute.networks.delete", + "path": "{project}/global/networks/{network}", + "httpMethod": "DELETE", + "description": "Deletes the specified network resource.", + "parameters": { + "network": { + "type": "string", + "description": "Name of the network resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "network" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.networks.get", + "path": "{project}/global/networks/{network}", + "httpMethod": "GET", + "description": "Returns the specified network resource.", + "parameters": { + "network": { + "type": "string", + "description": "Name of the network resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "network" + ], + "response": { + "$ref": "Network" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.networks.insert", + "path": "{project}/global/networks", + "httpMethod": "POST", + "description": "Creates a network resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Network" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.networks.list", + "path": "{project}/global/networks", + "httpMethod": "GET", + "description": "Retrieves the list of network resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "NetworkList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "projects": { + "methods": { + "get": { + "id": "compute.projects.get", + "path": "{project}", + "httpMethod": "GET", + "description": "Returns the specified project resource.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project resource to retrieve.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "Project" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "setCommonInstanceMetadata": { + "id": "compute.projects.setCommonInstanceMetadata", + "path": "{project}/setCommonInstanceMetadata", + "httpMethod": "POST", + "description": "Sets metadata common to all instances within the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Metadata" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "regionOperations": { + "methods": { + "delete": { + "id": "compute.regionOperations.delete", + "path": "{project}/regions/{region}/operations/{operation}", + "httpMethod": "DELETE", + "description": "Deletes the specified region-specific operation resource.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the operation resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "operation" + ], + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.regionOperations.get", + "path": "{project}/regions/{region}/operations/{operation}", + "httpMethod": "GET", + "description": "Retrieves the specified region-specific operation resource.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the operation resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "operation" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.regionOperations.list", + "path": "{project}/regions/{region}/operations", + "httpMethod": "GET", + "description": "Retrieves the list of operation resources contained within the specified region.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "OperationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "regions": { + "methods": { + "get": { + "id": "compute.regions.get", + "path": "{project}/regions/{region}", + "httpMethod": "GET", + "description": "Returns the specified region resource.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "Region" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.regions.list", + "path": "{project}/regions", + "httpMethod": "GET", + "description": "Retrieves the list of region resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "RegionList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "routes": { + "methods": { + "delete": { + "id": "compute.routes.delete", + "path": "{project}/global/routes/{route}", + "httpMethod": "DELETE", + "description": "Deletes the specified route resource.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "route": { + "type": "string", + "description": "Name of the route resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "route" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.routes.get", + "path": "{project}/global/routes/{route}", + "httpMethod": "GET", + "description": "Returns the specified route resource.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "route": { + "type": "string", + "description": "Name of the route resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "route" + ], + "response": { + "$ref": "Route" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.routes.insert", + "path": "{project}/global/routes", + "httpMethod": "POST", + "description": "Creates a route resource in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Route" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.routes.list", + "path": "{project}/global/routes", + "httpMethod": "GET", + "description": "Retrieves the list of route resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "RouteList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "snapshots": { + "methods": { + "delete": { + "id": "compute.snapshots.delete", + "path": "{project}/global/snapshots/{snapshot}", + "httpMethod": "DELETE", + "description": "Deletes the specified persistent disk snapshot resource.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "snapshot": { + "type": "string", + "description": "Name of the persistent disk snapshot resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "snapshot" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.snapshots.get", + "path": "{project}/global/snapshots/{snapshot}", + "httpMethod": "GET", + "description": "Returns the specified persistent disk snapshot resource.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "snapshot": { + "type": "string", + "description": "Name of the persistent disk snapshot resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "snapshot" + ], + "response": { + "$ref": "Snapshot" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.snapshots.list", + "path": "{project}/global/snapshots", + "httpMethod": "GET", + "description": "Retrieves the list of persistent disk snapshot resources contained within the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "SnapshotList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "targetPools": { + "methods": { + "addHealthCheck": { + "id": "compute.targetPools.addHealthCheck", + "path": "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck", + "httpMethod": "POST", + "description": "Adds health check URL to targetPool.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to which health_check_url is to be added.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "TargetPoolsAddHealthCheckRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "addInstance": { + "id": "compute.targetPools.addInstance", + "path": "{project}/regions/{region}/targetPools/{targetPool}/addInstance", + "httpMethod": "POST", + "description": "Adds instance url to targetPool.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to which instance_url is to be added.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "TargetPoolsAddInstanceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "aggregatedList": { + "id": "compute.targetPools.aggregatedList", + "path": "{project}/aggregated/targetPools", + "httpMethod": "GET", + "description": "Retrieves the list of target pools grouped by scope.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "TargetPoolAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.targetPools.delete", + "path": "{project}/regions/{region}/targetPools/{targetPool}", + "httpMethod": "DELETE", + "description": "Deletes the specified TargetPool resource.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.targetPools.get", + "path": "{project}/regions/{region}/targetPools/{targetPool}", + "httpMethod": "GET", + "description": "Returns the specified TargetPool resource.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "response": { + "$ref": "TargetPool" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getHealth": { + "id": "compute.targetPools.getHealth", + "path": "{project}/regions/{region}/targetPools/{targetPool}/getHealth", + "httpMethod": "POST", + "description": "Gets the most recent health check results for each IP for the given instance that is referenced by given TargetPool.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to which the queried instance belongs.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "InstanceReference" + }, + "response": { + "$ref": "TargetPoolInstanceHealth" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.targetPools.insert", + "path": "{project}/regions/{region}/targetPools", + "httpMethod": "POST", + "description": "Creates a TargetPool resource in the specified project and region using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "TargetPool" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.targetPools.list", + "path": "{project}/regions/{region}/targetPools", + "httpMethod": "GET", + "description": "Retrieves the list of TargetPool resources available to the specified project and region.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "TargetPoolList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "removeHealthCheck": { + "id": "compute.targetPools.removeHealthCheck", + "path": "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck", + "httpMethod": "POST", + "description": "Removes health check URL from targetPool.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to which health_check_url is to be removed.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "TargetPoolsRemoveHealthCheckRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "removeInstance": { + "id": "compute.targetPools.removeInstance", + "path": "{project}/regions/{region}/targetPools/{targetPool}/removeInstance", + "httpMethod": "POST", + "description": "Removes instance URL from targetPool.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource to which instance_url is to be removed.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "TargetPoolsRemoveInstanceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "setBackup": { + "id": "compute.targetPools.setBackup", + "path": "{project}/regions/{region}/targetPools/{targetPool}/setBackup", + "httpMethod": "POST", + "description": "Changes backup pool configurations.", + "parameters": { + "failoverRatio": { + "type": "number", + "description": "New failoverRatio value for the containing target pool.", + "format": "float", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "targetPool": { + "type": "string", + "description": "Name of the TargetPool resource for which the backup is to be set.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "targetPool" + ], + "request": { + "$ref": "TargetReference" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "zoneOperations": { + "methods": { + "delete": { + "id": "compute.zoneOperations.delete", + "path": "{project}/zones/{zone}/operations/{operation}", + "httpMethod": "DELETE", + "description": "Deletes the specified zone-specific operation resource.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the operation resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "operation" + ], + "scopes": [ + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.zoneOperations.get", + "path": "{project}/zones/{zone}/operations/{operation}", + "httpMethod": "GET", + "description": "Retrieves the specified zone-specific operation resource.", + "parameters": { + "operation": { + "type": "string", + "description": "Name of the operation resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "operation" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.zoneOperations.list", + "path": "{project}/zones/{zone}/operations", + "httpMethod": "GET", + "description": "Retrieves the list of operation resources contained within the specified zone.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "OperationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "zones": { + "methods": { + "get": { + "id": "compute.zones.get", + "path": "{project}/zones/{zone}", + "httpMethod": "GET", + "description": "Returns the specified zone resource.", + "parameters": { + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "Name of the zone resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "Zone" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.zones.list", + "path": "{project}/zones", + "httpMethod": "GET", + "description": "Retrieves the list of zone resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Optional. Filter expression for filtering listed resources.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 100.", + "default": "100", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Name of the project scoping this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "ZoneList" + }, + "scopes": [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + } + } +} diff --git a/gceapi/api/discovery.py b/gceapi/api/discovery.py new file mode 100644 index 0000000..bbc455f --- /dev/null +++ b/gceapi/api/discovery.py @@ -0,0 +1,63 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import inspect +import os +import threading +import webob + +from gceapi.openstack.common import log as logging +from gceapi import wsgi_ext as openstack_wsgi + +LOG = logging.getLogger(__name__) + + +class Controller(object): + + _lock = threading.RLock() + _files = {} + + def discovery(self, req, version): + """Returns appropriate json by its version.""" + + if version in self._files: + return self._files[version] + + self._lock.acquire() + try: + if version in self._files: + return self._files[version] + + jfile = self._load_file(version) + jfile = jfile.replace("{HOST_URL}", req.host_url) + self._files[version] = jfile + return jfile + finally: + self._lock.release() + + def _load_file(self, version): + current_file = os.path.abspath(inspect.getsourcefile(lambda _: None)) + current_dir = os.path.dirname(current_file) + file_name = os.path.join(current_dir, "compute", version + ".json") + try: + f = open(file_name) + except Exception as ex: + raise webob.exc.HTTPNotFound(ex) + result = f.read() + f.close() + return result + + +def create_resource(): + return openstack_wsgi.Resource(Controller()) diff --git a/gceapi/api/disk_api.py b/gceapi/api/disk_api.py new file mode 100644 index 0000000..41a42bb --- /dev/null +++ b/gceapi/api/disk_api.py @@ -0,0 +1,161 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import base_api +from gceapi.api import clients +from gceapi.api import image_api +from gceapi.api import operation_api +from gceapi.api import operation_util +from gceapi.api import scopes +from gceapi.api import utils +from gceapi import exception + + +GB = 1024 ** 3 + + +class API(base_api.API): + """GCE Disk API.""" + + KIND = "disk" + _status_map = { + "creating": "CREATING", + "downloading": "CREATING", + "available": "READY", + "attaching": "READY", + "in-use": "READY", + # "deleting": "", + "error": "FAILED", + # "error_deleting": "", + "backing-up": "READY", + "restoring-backup": "READY", + # "error_restoring": "" + } + + def __init__(self, *args, **kwargs): + super(API, self).__init__(*args, **kwargs) + operation_api.API().register_get_progress_method( + "disk-add", + self._get_add_item_progress) + operation_api.API().register_get_progress_method( + "disk-delete", + self._get_delete_item_progress) + + def _get_type(self): + return self.KIND + + def get_item(self, context, name, scope=None): + client = clients.cinder(context) + volumes = client.volumes.list(search_opts={"display_name": name}) + volumes = self._filter_volumes_by_zone(volumes, scope) + volumes = [utils.to_dict(item) for item in volumes] + if not volumes or len(volumes) != 1: + raise exception.NotFound + return self._prepare_item(client, volumes[0]) + + def get_items(self, context, scope=None): + client = clients.cinder(context) + volumes = client.volumes.list() + volumes = self._filter_volumes_by_zone(volumes, scope) + volumes = [utils.to_dict(item) for item in volumes] + for volume in volumes: + self._prepare_item(client, volume) + return volumes + + def get_scopes(self, context, item): + return [scopes.ZoneScope(item["availability_zone"])] + + def _prepare_item(self, client, item): + snapshot = None + snapshot_id = item["snapshot_id"] + if snapshot_id: + snapshot = utils.to_dict(client.volume_snapshots.get(snapshot_id)) + item["snapshot"] = snapshot + item["status"] = self._status_map.get(item["status"], item["status"]) + item["name"] = item["display_name"] + image = item.get("volume_image_metadata") + if image: + item["image_name"] = image["image_name"] + return item + + def _filter_volumes_by_zone(self, volumes, scope): + if scope is None: + return volumes + return filter( + lambda volume: volume.availability_zone == scope.get_name(), + volumes) + + def delete_item(self, context, name, scope=None): + client = clients.cinder(context).volumes + volumes = client.list(search_opts={"display_name": name}) + if not volumes or len(volumes) != 1: + raise exception.NotFound + operation_util.start_operation(context, + self._get_delete_item_progress, + volumes[0].id) + client.delete(volumes[0]) + + def add_item(self, context, name, body, scope=None): + sizeGb = int(body['sizeGb']) if 'sizeGb' in body else None + + snapshot_uri = body.get("sourceSnapshot") + image_uri = body.get("sourceImage") + snapshot_id = None + image_id = None + + client = clients.cinder(context) + if snapshot_uri: + snapshot_name = utils._extract_name_from_url(snapshot_uri) + snapshots = client.volume_snapshots.list( + search_opts={"display_name": snapshot_name}) + if not snapshots or len(snapshots) != 1: + raise exception.NotFound + snapshot_id = snapshots[0].id + elif image_uri: + image_name = utils._extract_name_from_url(image_uri) + image = image_api.API().get_item(context, image_name, scope) + image_id = image['id'] + # Cinder API doesn't get size from image, so we do this + image_size_in_gb = (int(image['size']) + GB - 1) / GB + if not sizeGb or sizeGb < image_size_in_gb: + sizeGb = image_size_in_gb + + operation_util.start_operation(context, self._get_add_item_progress) + volume = client.volumes.create( + sizeGb, snapshot_id=snapshot_id, + display_name=body.get('name'), + display_description=body.get('description'), + imageRef=image_id, + availability_zone=scope.get_name()) + operation_util.set_item_id(context, volume.id) + + return self._prepare_item(client, utils.to_dict(volume)) + + def _get_add_item_progress(self, context, volume_id): + client = clients.cinder(context) + try: + volume = client.volumes.get(volume_id) + except clients.cinderclient.exceptions.NotFound: + return operation_api.gef_final_progress() + if (volume.status not in ["creating", "downloading"]): + return operation_api.gef_final_progress(volume.status == "error") + + def _get_delete_item_progress(self, context, volume_id): + client = clients.cinder(context) + try: + volume = client.volumes.get(volume_id) + except clients.cinderclient.exceptions.NotFound: + return operation_api.gef_final_progress() + if volume.status not in ["deleting", "deleted"]: + return operation_api.gef_final_progress(True) diff --git a/gceapi/api/disks.py b/gceapi/api/disks.py new file mode 100644 index 0000000..637f3d5 --- /dev/null +++ b/gceapi/api/disks.py @@ -0,0 +1,69 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import common as gce_common +from gceapi.api import disk_api +from gceapi.api import operation_util +from gceapi.api import scopes +from gceapi.api import snapshot_api +from gceapi.api import wsgi as gce_wsgi + + +class Controller(gce_common.Controller): + """GCE Disk controller""" + + def __init__(self, *args, **kwargs): + super(Controller, self).__init__(disk_api.API(), + *args, **kwargs) + + def format_item(self, request, volume, scope): + result_dict = { + "creationTimestamp": self._format_date(volume["created_at"]), + "status": volume["status"], + "name": volume["display_name"], + "description": volume["display_description"], + "sizeGb": volume["size"], + } + snapshot = volume["snapshot"] + if snapshot: + result_dict["sourceSnapshot"] = self._qualify(request, + "snapshots", snapshot["display_name"], + scopes.GlobalScope()) + result_dict["sourceSnapshotId"] = snapshot["id"] + image_name = volume.get("image_name") + if image_name: + result_dict["sourceImage"] = self._qualify(request, + "images", image_name, scopes.GlobalScope()) + result_dict["sourceImageId"] = self._get_id( + result_dict["sourceImage"]) + + return self._format_item(request, result_dict, scope) + + def create(self, req, body, scope_id): + source_image = req.params.get("sourceImage") + if source_image is not None: + body["sourceImage"] = source_image + return super(Controller, self).create(req, body, scope_id) + + def create_snapshot(self, req, body, scope_id, id): + body["disk_name"] = id + scope = self._get_scope(req, scope_id) + context = self._get_context(req) + operation_util.init_operation(context, "createSnapshot", + self._type_name, id, scope) + snapshot_api.API().add_item(context, body, scope) + + +def create_resource(): + return gce_wsgi.GCEResource(Controller()) diff --git a/gceapi/api/firewall_api.py b/gceapi/api/firewall_api.py new file mode 100644 index 0000000..4480b95 --- /dev/null +++ b/gceapi/api/firewall_api.py @@ -0,0 +1,256 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from gceapi.api import base_api +from gceapi.api import clients +from gceapi.api import network_api +from gceapi.api import operation_util +from gceapi.api import utils +from gceapi import exception +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import log as logging + + +PROTOCOL_MAP = { + '1': 'icmp', + '6': 'tcp', + '17': 'udp', +} +LOG = logging.getLogger(__name__) + + +class API(base_api.API): + """GCE Firewall API.""" + + KIND = "firewall" + PERSISTENT_ATTRIBUTES = ["id", "creationTimestamp", "network_name"] + + def __init__(self, *args, **kwargs): + super(API, self).__init__(*args, **kwargs) + network_api.API()._register_callback( + base_api._callback_reasons.pre_delete, + self.delete_network_firewalls) + + def _get_type(self): + return self.KIND + + def _get_persistent_attributes(self): + return self.PERSISTENT_ATTRIBUTES + + def get_item(self, context, name, scope=None): + client = clients.nova(context) + try: + firewall = client.security_groups.find(name=name) + except (clients.novaclient.exceptions.NotFound, + clients.novaclient.exceptions.NoUniqueMatch): + raise exception.NotFound() + firewall = self._prepare_firewall(utils.to_dict(firewall)) + db_firewall = self._get_db_item_by_id(context, firewall["id"]) + self._prepare_item(firewall, db_firewall) + return firewall + + def get_items(self, context, scope=None): + client = clients.nova(context) + firewalls = client.security_groups.list() + items = list() + gce_firewalls = self._get_db_items_dict(context) + for firewall in firewalls: + item = self._prepare_firewall(utils.to_dict(firewall)) + self._prepare_item(item, gce_firewalls.get(item["id"])) + items.append(item) + self._purge_db(context, items, gce_firewalls) + return items + + def add_item(self, context, name, body, scope=None): + network = self._get_network_by_url(context, body['network']) + self._check_rules(body) + group_description = body.get("description", "") + client = clients.nova(context) + operation_util.start_operation(context) + sg = client.security_groups.create(body['name'], group_description) + try: + rules = self._convert_to_secgroup_rules(body) + for rule in rules: + client.security_group_rules.create( + sg.id, ip_protocol=rule["protocol"], + from_port=rule["from_port"], to_port=rule["to_port"], + cidr=rule["cidr"], ) + except Exception: + client.security_groups.delete(sg) + raise + new_firewall = utils.to_dict(client.security_groups.get(sg.id)) + new_firewall = self._prepare_firewall(new_firewall) + new_firewall["creationTimestamp"] = 1 + new_firewall["network_name"] = network["name"] + new_firewall = self._add_db_item(context, new_firewall) + self._process_callbacks( + context, base_api._callback_reasons.post_add, new_firewall) + return new_firewall + + def delete_item(self, context, name, scope=None): + firewall = self.get_item(context, name) + operation_util.start_operation(context) + self._process_callbacks( + context, base_api._callback_reasons.pre_delete, firewall) + client = clients.nova(context) + try: + client.security_groups.delete(firewall["id"]) + self._delete_db_item(context, firewall) + except clients.novaclient.exceptions.ClientException as ex: + raise exception.GceapiException(message=ex.message, code=ex.code) + + def _prepare_firewall(self, firewall): + # NOTE(ft): OpenStack security groups are more powerful than + # gce firewalls so when we cannot completely convert secgroup + # we add prefixes to firewall description + # [*] - cidr rules too complex to convert + # [+] - non-cidr rules presents + + non_cidr_rule_exists = False + too_complex_for_gce = False + + # NOTE(ft): group OpenStack rules by cidr and proto + # cidr group must be comparable object + def _ports_to_str(rule): + if rule['from_port'] == rule['to_port']: + return str(rule['from_port']) + else: + return "%s-%s" % (rule['from_port'], rule['to_port']) + + grouped_rules = {} + for rule in firewall["rules"]: + if "cidr" not in rule["ip_range"] or not rule["ip_range"]["cidr"]: + non_cidr_rule_exists = True + continue + cidr = rule.get("ip_range", {}).get("cidr") + proto = rule["ip_protocol"] + cidr_group = grouped_rules.setdefault(cidr, {}) + proto_ports = cidr_group.setdefault(proto, set()) + proto_ports.add(_ports_to_str(rule)) + + # NOTE(ft): compare cidr grups to understand + # whether OpenStack rules are too complex or not + common_rules = None + for cidr in grouped_rules: + if common_rules: + if common_rules != grouped_rules[cidr]: + too_complex_for_gce = True + break + else: + common_rules = grouped_rules[cidr] + + # NOTE(ft): check icmp rules: + # if per icmp type rule present then rules are too complex + if not too_complex_for_gce and common_rules and "icmp" in common_rules: + icmp_rules = common_rules["icmp"] + if len(icmp_rules) == 1: + icmp_rule = icmp_rules.pop() + if icmp_rule != "-1": + too_complex_for_gce = True + else: + too_complex_for_gce = True + + # NOTE(ft): build gce rules if possible + def _build_gce_port_rule(proto, rules): + gce_rule = {"IPProtocol": proto} + if proto != "icmp": + gce_rule["ports"] = rules + return gce_rule + + sourceRanges = [] + allowed = [] + if not too_complex_for_gce: + sourceRanges = [cidr for cidr in grouped_rules] or ["0.0.0.0/0"] + if common_rules: + allowed = [_build_gce_port_rule(proto, common_rules[proto]) + for proto in common_rules] + firewall["sourceRanges"] = sourceRanges + firewall["allowed"] = allowed + + # NOTE(ft): add prefixes to description + description = firewall.get("description") + prefixes = [] + if too_complex_for_gce: + prefixes.append("[*]") + if non_cidr_rule_exists: + prefixes.append("[+]") + if prefixes: + if description is not None: + prefixes.append(description) + description = "".join(prefixes) + firewall["description"] = description + + return firewall + + def _get_network_by_url(self, context, url): + # NOTE(apavlov): Check existence of such network + network_name = utils._extract_name_from_url(url) + return network_api.API().get_item(context, network_name) + + def _check_rules(self, firewall): + if not firewall.get('sourceRanges') or firewall.get('sourceTags'): + msg = _("Not 'sourceRange' neither 'sourceTags' is provided") + raise exception.InvalidRequest(msg) + for allowed in firewall.get('allowed', []): + proto = allowed.get('IPProtocol') + proto = PROTOCOL_MAP.get(proto, proto) + if not proto or proto not in PROTOCOL_MAP.values(): + msg = _("Invlaid protocol") + raise exception.InvalidRequest(msg) + if proto == 'icmp' and allowed.get('ports'): + msg = _("Invalid options for icmp protocol") + raise exception.InvalidRequest(msg) + + def _convert_to_secgroup_rules(self, firewall): + rules = [] + for source_range in firewall['sourceRanges']: + for allowed in firewall.get('allowed', []): + proto = allowed['IPProtocol'] + proto = PROTOCOL_MAP.get(proto, proto) + rule = { + "protocol": proto, + "cidr": source_range, + } + if proto == "icmp": + rule["from_port"] = -1 + rule["to_port"] = -1 + rules.append(rule) + else: + for port in allowed.get('ports', []): + if "-" in port: + from_port, to_port = port.split("-") + else: + from_port = to_port = port + rule["from_port"] = from_port + rule["to_port"] = to_port + rules.append(copy.copy(rule)) + return rules + + def get_network_firewalls(self, context, network_name): + firewalls = self.get_items(context, None) + return [f for f in firewalls + if f.get("network_name", None) == network_name] + + def delete_network_firewalls(self, context, network): + network_name = network["name"] + client = clients.nova(context) + for secgroup in self.get_network_firewalls(context, network_name): + try: + client.security_groups.delete(secgroup["id"]) + except Exception: + LOG.exception(("Failed to delete security group (%s) while" + "delete network (%s))"), + secgroup["name"], network_name) diff --git a/gceapi/api/firewalls.py b/gceapi/api/firewalls.py new file mode 100644 index 0000000..933a888 --- /dev/null +++ b/gceapi/api/firewalls.py @@ -0,0 +1,45 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import common as gce_common +from gceapi.api import firewall_api +from gceapi.api import scopes +from gceapi.api import wsgi as gce_wsgi + + +class Controller(gce_common.Controller): + """GCE Firewall controller""" + + def __init__(self, *args, **kwargs): + super(Controller, self).__init__(firewall_api.API(), *args, **kwargs) + + def format_item(self, request, firewall, scope): + result_dict = { + "creationTimestamp": firewall.get("creationTimestamp", ""), + "name": firewall["name"], + "description": firewall["description"], + "sourceRanges": firewall["sourceRanges"], + "allowed": firewall["allowed"], + } + + network = firewall.get("network_name") + if network: + result_dict["network"] = self._qualify(request, + "networks", network, scopes.GlobalScope()) + + return self._format_item(request, result_dict, scope) + + +def create_resource(): + return gce_wsgi.GCEResource(Controller()) diff --git a/gceapi/api/image_api.py b/gceapi/api/image_api.py new file mode 100644 index 0000000..0e56640 --- /dev/null +++ b/gceapi/api/image_api.py @@ -0,0 +1,142 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +try: + from glanceclient import exc as glanceclient_exc +except ImportError: + glanceclient_exc = None + +from gceapi.api import base_api +from gceapi.api import clients +from gceapi.api import operation_api +from gceapi.api import operation_util +from gceapi.api import utils +from gceapi import exception +from gceapi.openstack.common.gettextutils import _ + + +class API(base_api.API): + """GCE Image API.""" + + KIND = "image" + PERSISTENT_ATTRIBUTES = ["id", "description", "image_ref"] + + _status_map = { + "queued": "PENDING", + "saving": "PENDING", + "active": "READY", + "killed": "FAILED", + # "deleted": "", + # "pending_delete": "" + } + + def __init__(self, *args, **kwargs): + super(API, self).__init__(*args, **kwargs) + operation_api.API().register_get_progress_method( + "image-add", + self._get_add_item_progress) + operation_api.API().register_get_progress_method( + "image-delete", + self._get_delete_item_progress) + + def _get_type(self): + return self.KIND + + def _get_persistent_attributes(self): + return self.PERSISTENT_ATTRIBUTES + + def get_item(self, context, name, scope=None): + image_service = clients.glance(context).images + images = image_service.list( + filters={"name": name, "disk_format": "raw"}) + result = None + for image in images: + if image.status == "deleted": + continue + if result: + msg = _("Image resource '%s' found more than once") % name + raise exception.NotFound(msg) + result = self._prepare_image(utils.to_dict(image)) + db_image = self._get_db_item_by_id(context, result["id"]) + self._prepare_item(result, db_image) + if not result: + msg = _("Image resource '%s' could not be found") % name + raise exception.NotFound(msg) + return result + + def get_items(self, context, scope=None): + image_service = clients.glance(context).images + images = image_service.list(filters={"disk_format": "raw"}) + items = list() + gce_images = self._get_db_items_dict(context) + for image in images: + result = self._prepare_image(utils.to_dict(image)) + self._prepare_item(result, gce_images.get(result["id"])) + items.append(result) + self._purge_db(context, items, gce_images) + return items + + def _prepare_image(self, item): + item["status"] = self._status_map.get(item["status"], item["status"]) + return item + + def delete_item(self, context, name, scope=None): + """Delete an image, if allowed.""" + image = self.get_item(context, name, scope) + image_service = clients.glance(context).images + operation_util.start_operation(context, + self._get_delete_item_progress, + image["id"]) + image_service.delete(image["id"]) + self._delete_db_item(context, image) + + def add_item(self, context, name, body, scope=None): + name = body['name'] + image_ref = body['rawDisk']['source'] + meta = { + 'name': name, + 'disk_format': 'raw', + 'container_format': 'bare', + 'min_disk': 0, + 'min_ram': 0, + 'copy_from': image_ref, + } + image_service = clients.glance(context).images + operation_util.start_operation(context, self._get_add_item_progress) + image = image_service.create(**meta) + operation_util.set_item_id(context, image.id) + + new_image = self._prepare_image(utils.to_dict(image)) + new_image["description"] = body.get("description", "") + new_image["image_ref"] = image_ref + new_image = self._add_db_item(context, new_image) + return new_image + + def _get_add_item_progress(self, context, image_id): + image_service = clients.glance(context).images + try: + image = image_service.get(image_id) + except glanceclient_exc.HTTPNotFound: + return operation_api.gef_final_progress() + if image.status not in ["queued", "saving"]: + return operation_api.gef_final_progress(image.status == "killed") + + def _get_delete_item_progress(self, context, image_id): + image_service = clients.glance(context).images + try: + image = image_service.get(image_id) + except glanceclient_exc.HTTPNotFound: + return operation_api.gef_final_progress() + if image.status not in ["pending_delete", "deleted"]: + return operation_api.gef_final_progress(True) diff --git a/gceapi/api/images.py b/gceapi/api/images.py new file mode 100644 index 0000000..807263d --- /dev/null +++ b/gceapi/api/images.py @@ -0,0 +1,44 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import common as gce_common +from gceapi.api import image_api +from gceapi.api import wsgi as gce_wsgi + + +class Controller(gce_common.Controller): + """GCE Image controller""" + + def __init__(self, *args, **kwargs): + super(Controller, self).__init__(image_api.API(), *args, **kwargs) + + def format_item(self, request, image, scope): + result_dict = { + "creationTimestamp": self._format_date(image["created_at"]), + "name": image["name"], + "sourceType": image["disk_format"].upper(), + "rawDisk": { + "containerType": "TAR", + "source": image.get("image_ref", ""), + }, + "status": image["status"], + "archiveSizeBytes": image["size"], + "description": image.get("description", "") + } + + return self._format_item(request, result_dict, scope) + + +def create_resource(): + return gce_wsgi.GCEResource(Controller()) diff --git a/gceapi/api/instance_address_api.py b/gceapi/api/instance_address_api.py new file mode 100644 index 0000000..ff0433a --- /dev/null +++ b/gceapi/api/instance_address_api.py @@ -0,0 +1,162 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import base_api +from gceapi.api import clients +from gceapi.api import operation_util +from gceapi import exception +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class API(base_api.API): + """GCE Access config API.""" + + KIND = "access_config" + PERSISTENT_ATTRIBUTES = ["id", "instance_name", + "nic", "name", "type", "addr"] + DEFAULT_ACCESS_CONFIG_TYPE = "ONE_TO_ONE_NAT" + DEFAULT_ACCESS_CONFIG_NAME = "External NAT" + + def __init__(self, *args, **kwargs): + super(API, self).__init__(*args, **kwargs) + + def _get_type(self): + return self.KIND + + def _get_persistent_attributes(self): + return self.PERSISTENT_ATTRIBUTES + + def get_item(self, context, instance_name, name): + items = self._get_db_items(context) + items = [i for i in items + if i["instance_name"] == instance_name and i["name"] == name] + if len(items) != 1: + raise exception.NotFound + return items[0] + + def get_items(self, context, instance_name): + items = self._get_db_items(context) + return [i for i in items if i["instance_name"] == instance_name] + + def add_item(self, context, instance_name, nic, addr, addr_type, name): + if not nic: + msg = _("Network interface is invalid or empty") + raise exception.InvalidRequest(msg) + + if addr_type is None: + addr_type = self.DEFAULT_ACCESS_CONFIG_TYPE + elif addr_type != self.DEFAULT_ACCESS_CONFIG_TYPE: + msg = _("Only '%s' type of access config currently supported.")\ + % self.DEFAULT_ACCESS_CONFIG_TYPE + raise exception.InvalidRequest(msg) + + client = clients.nova(context) + instances = client.servers.list(search_opts={"name": instance_name}) + if not instances or len(instances) != 1: + raise exception.NotFound + instance = instances[0] + + fixed_ip = None + for network in instance.addresses: + if nic != network: + continue + for address in instance.addresses[network]: + atype = address["OS-EXT-IPS:type"] + if atype == "floating": + msg = _('At most one access config currently supported.') + raise exception.InvalidRequest(msg) + if atype == "fixed": + fixed_ip = address["addr"] + + if not fixed_ip: + msg = _('Network interface not found') + raise exception.InvalidRequest(msg) + + floating_ips = client.floating_ips.list() + if addr is None: + # NOTE(apavlov): try to find unused + for floating_ip in floating_ips: + if floating_ip.instance_id is None: + addr = floating_ip.ip + break + else: + msg = _('There is no unused floating ips.') + raise exception.InvalidRequest(msg) + else: + for floating_ip in floating_ips: + if floating_ip.ip != addr: + continue + if floating_ip.instance_id is None: + break + msg = _("Floating ip '%s' is already associated") % floating_ip + raise exception.InvalidRequest(msg) + else: + msg = _("There is no such floating ip '%s'.") % addr + raise exception.InvalidRequest(msg) + + operation_util.start_operation(context) + instance.add_floating_ip(addr, fixed_ip) + + return self.register_item(context, instance_name, + nic, addr, addr_type, name) + + def register_item(self, context, instance_name, + nic, addr, addr_type, name): + if not nic: + msg = _("Network interface is invalid or empty") + raise exception.InvalidRequest(msg) + + if addr_type is None: + addr_type = self.DEFAULT_ACCESS_CONFIG_TYPE + elif addr_type != self.DEFAULT_ACCESS_CONFIG_TYPE: + msg = _("Only '%s' type of access config currently supported.")\ + % self.DEFAULT_ACCESS_CONFIG_TYPE + raise exception.InvalidRequest(msg) + + if name is None: + name = self.DEFAULT_ACCESS_CONFIG_NAME + if not addr: + msg = _("There is no address to assign.") + raise exception.InvalidRequest(msg) + + new_item = { + "id": instance_name + "-" + addr, + "instance_name": instance_name, + "nic": nic, + "name": name, + "type": addr_type, + "addr": addr + } + new_item = self._add_db_item(context, new_item) + return new_item + + def delete_item(self, context, instance_name, name): + client = clients.nova(context) + instances = client.servers.list(search_opts={"name": instance_name}) + if not instances or len(instances) != 1: + raise exception.NotFound + instance = instances[0] + + item = self.get_item(context, instance_name, name) + floating_ip = item["addr"] + operation_util.start_operation(context) + instance.remove_floating_ip(floating_ip) + self._delete_db_item(context, item) + + def unregister_item(self, context, instance_name, name): + item = self.get_item(context, instance_name, name) + self._delete_db_item(context, item) diff --git a/gceapi/api/instance_api.py b/gceapi/api/instance_api.py new file mode 100644 index 0000000..34b71c0 --- /dev/null +++ b/gceapi/api/instance_api.py @@ -0,0 +1,377 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import string + +from gceapi.api import base_api +from gceapi.api import clients +from gceapi.api import disk_api +from gceapi.api import firewall_api +from gceapi.api import instance_address_api +from gceapi.api import instance_disk_api +from gceapi.api import machine_type_api +from gceapi.api import network_api +from gceapi.api import operation_api +from gceapi.api import operation_util +from gceapi.api import project_api +from gceapi.api import scopes +from gceapi.api import utils +from gceapi import exception +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class API(base_api.API): + """GCE Instance API.""" + + KIND = "instance" + PERSISTENT_ATTRIBUTES = ["id", "description"] + + _status_map = { + "UNKNOWN": "STOPPED", + "ACTIVE": "RUNNING", + "REBOOT": "RUNNING", + "HARD_REBOOT": "RUNNING", + "PASSWORD": "RUNNING", + "REBUILD": "RUNNING", + "MIGRATING": "RUNNING", + "RESIZE": "RUNNING", + "BUILD": "PROVISIONING", + "SHUTOFF": "STOPPED", + "VERIFY_RESIZE": "RUNNING", + "REVERT_RESIZE": "RUNNING", + "PAUSED": "STOPPED", + "SUSPENDED": "STOPPED", + "RESCUE": "RUNNING", + "ERROR": "STOPPED", + "DELETED": "TERMINATED", + "SOFT_DELETED": "TERMINATED", + "SHELVED": "STOPPED", + "SHELVED_OFFLOADED": "STOPPED", + } + + def __init__(self, *args, **kwargs): + super(API, self).__init__(*args, **kwargs) + network_api.API()._register_callback( + base_api._callback_reasons.check_delete, + self._can_delete_network) + firewall_api.API()._register_callback( + base_api._callback_reasons.post_add, + self._add_secgroup_to_instances) + firewall_api.API()._register_callback( + base_api._callback_reasons.pre_delete, + self._remove_secgroup_from_instances) + operation_api.API().register_get_progress_method( + "instance-add", + self._get_add_item_progress) + operation_api.API().register_get_progress_method( + "instance-delete", + self._get_delete_item_progress) + operation_api.API().register_get_progress_method( + "instance-reset", + self._get_reset_instance_progress) + + def _get_type(self): + return self.KIND + + def _get_persistent_attributes(self): + return self.PERSISTENT_ATTRIBUTES + + def get_item(self, context, name, scope=None): + return self.search_items(context, {"name": name}, scope)[0] + + def get_items(self, context, scope=None): + return self.search_items(context, None, scope) + + def get_scopes(self, context, item): + return [scopes.ZoneScope(item["OS-EXT-AZ:availability_zone"])] + + def search_items(self, context, search_opts, scope): + client = clients.nova(context) + instances = client.servers.list(search_opts=search_opts) + + filtered_instances = [] + for instance in instances: + iscope = getattr(instance, "OS-EXT-AZ:availability_zone") + if scope is not None and scope.get_name() != iscope: + continue + + instance = utils.to_dict(instance) + instance = self._prepare_instance(client, context, instance) + db_instance = self._get_db_item_by_id(context, instance["id"]) + self._prepare_item(instance, db_instance) + filtered_instances.append(instance) + + if len(filtered_instances) == len(instances) and not search_opts: + gce_instances = self._get_db_items_dict(context) + self._purge_db(context, filtered_instances, gce_instances) + + return filtered_instances + + def _prepare_instance(self, client, context, instance): + instance["statusMessage"] = instance["status"] + instance["status"] = self._status_map.get( + instance["status"], "STOPPED") + instance["flavor"]["name"] = machine_type_api.API().get_item_by_id( + context, instance["flavor"]["id"])["name"] + + cinder_client = clients.cinder(context) + volumes = instance["os-extended-volumes:volumes_attached"] + instance["volumes"] = [utils.to_dict( + cinder_client.volumes.get(v["id"])) for v in volumes] + ads = instance_disk_api.API().get_items(context, instance["name"]) + ads = dict((ad["volume_id"], ad) for ad in ads) + for volume in instance["volumes"]: + ad = ads.pop(volume["id"], None) + if not ad: + name = volume["display_name"] + ad = instance_disk_api.API().register_item(context, + instance["name"], volume["id"], name) + volume["device_name"] = ad["name"] + # NOTE(apavlov): cleanup unused from db for this instance + for ad in ads: + ad = instance_disk_api.API().unregister_item(context, + instance["name"], ads[ad]["name"]) + + acs = instance_address_api.API().get_items(context, instance["name"]) + acs = dict((ac["addr"], ac) for ac in acs) + for network in instance["addresses"]: + for address in instance["addresses"][network]: + if address["OS-EXT-IPS:type"] == "floating": + ac = acs.pop(address["addr"], None) + if not ac: + ac = instance_address_api.API().register_item(context, + instance["name"], network, address["addr"], + None, None) + address["name"] = ac["name"] + address["type"] = ac["type"] + # NOTE(apavlov): cleanup unused from db for this instance + for ac in acs: + ac = instance_address_api.API().unregister_item(context, + instance["name"], acs[ac]["name"]) + + return instance + + def _can_delete_network(self, context, network): + client = clients.nova(context) + instances = client.servers.list(search_opts=None) + for instance in instances: + if network["name"] in instance.networks: + raise exception.NetworkInUse(network_id=network["id"]) + + def _get_instances_with_network(self, context, network_name, scope): + affected_instances = [] + client = clients.nova(context) + instances = client.servers.list(search_opts=None) + for instance in instances: + if network_name in instance.networks: + affected_instances.append(instance) + return affected_instances + + def _add_secgroup_to_instances(self, context, secgroup, **kwargs): + network_name = secgroup.get("network_name") + if not network_name: + return + affected_instances = self._get_instances_with_network( + context, network_name, kwargs.get("scope")) + # TODO(ft): implement common safe method + # to run add/remove with exception logging + for instance in affected_instances: + try: + instance.add_security_group(secgroup["name"]) + except Exception: + LOG.exception(("Failed to add instance " + "(%s) to security group (%s)"), + instance.id, secgroup["name"]) + + def _remove_secgroup_from_instances(self, context, secgroup, **kwargs): + network_name = secgroup.get("network_name") + if not network_name: + return + affected_instances = self._get_instances_with_network( + context, network_name, kwargs.get("scope")) + # TODO(ft): implement common safe method + # to run add/remove with exception logging + for instance in affected_instances: + try: + instance.remove_security_group(secgroup["name"]) + except Exception: + LOG.exception(("Failed to remove securiy group (%s) " + "from instance (%s)"), + secgroup["name"], instance.id) + + def reset_instance(self, context, scope, name): + client = clients.nova(context) + instances = client.servers.list(search_opts={"name": name}) + if not instances or len(instances) != 1: + raise exception.NotFound + instance = instances[0] + operation_util.start_operation(context, + self._get_reset_instance_progress, + instance.id) + instance.reboot("HARD") + + def delete_item(self, context, name, scope=None): + client = clients.nova(context) + instances = client.servers.list(search_opts={"name": name}) + if not instances or len(instances) != 1: + raise exception.NotFound + instance = instances[0] + operation_util.start_operation(context, + self._get_delete_item_progress, + instance.id) + instance.delete() + instance = utils.to_dict(instance) + instance = self._prepare_instance(client, context, instance) + self._delete_db_item(context, instance) + + ads = instance_disk_api.API().get_items(context, instance["name"]) + for ad in ads: + ad = instance_disk_api.API().unregister_item(context, + instance["name"], ad["name"]) + + acs = instance_address_api.API().get_items(context, instance["name"]) + for ac in acs: + ac = instance_address_api.API().unregister_item(context, + instance["name"], ac["name"]) + + def add_item(self, context, name, body, scope=None): + name = body['name'] + client = clients.nova(context) + + flavor_name = utils._extract_name_from_url(body['machineType']) + flavor_id = machine_type_api.API().get_item( + context, flavor_name, scope)["id"] + + try: + metadatas = body['metadata']['items'] + except KeyError: + metadatas = [] + instance_metadata = dict([(x['key'], x['value']) for x in metadatas]) + + ssh_keys = instance_metadata.pop('sshKeys', None) + if ssh_keys is not None: + key_name = ssh_keys.split('\n')[0].split(":")[0] + else: + key_name = project_api.API().get_gce_user_keypair_name(context) + + disks = body.get('disks', []) + disks.sort(None, lambda x: x.get("boot", False), True) + bdm = dict() + diskDevice = 0 + for disk in disks: + device_name = "vd" + string.ascii_lowercase[diskDevice] + volume_name = utils._extract_name_from_url(disk["source"]) + volume = disk_api.API().get_item(context, volume_name, scope) + disk["id"] = volume["id"] + bdm[device_name] = volume["id"] + diskDevice += 1 + + nics = [] + #NOTE(ft) 'default' security group contains output rules + #but output rules doesn't configurable by GCE API + #all outgoing traffic permitted + #so we support this behaviour + groups_names = set(['default']) + acs = dict() + for net_iface in body['networkInterfaces']: + net_name = utils._extract_name_from_url(net_iface["network"]) + ac = net_iface.get("accessConfigs") + if ac: + if len(ac) > 1: + msg = _('At most one access config currently supported.') + raise exception.InvalidRequest(msg) + else: + acs[net_name] = ac[0] + + network = network_api.API().get_item(context, net_name, None) + nics.append({"net-id": network["id"]}) + for sg in firewall_api.API().get_network_firewalls( + context, net_name): + groups_names.add(sg["name"]) + groups_names = list(groups_names) + + operation_util.start_operation(context, self._get_add_item_progress) + instance = client.servers.create(name, None, flavor_id, + meta=instance_metadata, min_count=1, max_count=1, + security_groups=groups_names, key_name=key_name, + availability_zone=scope.get_name(), block_device_mapping=bdm, + nics=nics) + if not acs: + operation_util.set_item_id(context, instance.id) + + for disk in disks: + instance_disk_api.API().register_item(context, name, + disk["id"], disk["deviceName"]) + + instance = utils.to_dict(client.servers.get(instance.id)) + instance = self._prepare_instance(client, context, instance) + if "descripton" in body: + instance["description"] = body["description"] + instance = self._add_db_item(context, instance) + + if acs: + operation_util.continue_operation( + context, + lambda: self._add_access_config(context, instance, + scope, acs)) + + return instance + + def _add_access_config(self, context, instance, scope, acs): + progress = self._get_add_item_progress(context, instance["id"]) + if progress is None or not operation_api.is_final_progress(progress): + return progress + + client = clients.nova(context) + try: + instance = client.servers.get(instance["id"]) + except clients.novaclient.exceptions.NotFound: + return operation_api.gef_final_progress() + + for net in acs: + ac = acs[net] + instance_address_api.API().add_item(context, instance.name, + net, ac.get("natIP"), ac.get("type"), ac.get("name")) + return operation_api.gef_final_progress() + + def _get_add_item_progress(self, context, instance_id): + client = clients.nova(context) + try: + instance = client.servers.get(instance_id) + except clients.novaclient.exceptions.NotFound: + return operation_api.gef_final_progress() + if instance.status != "BUILD": + return operation_api.gef_final_progress(instance.status == "ERROR") + + def _get_delete_item_progress(self, context, instance_id): + client = clients.nova(context) + try: + instance = client.servers.get(instance_id) + except clients.novaclient.exceptions.NotFound: + return operation_api.gef_final_progress() + if getattr(instance, "OS-EXT-STS:task_state") != "deleting": + return operation_api.gef_final_progress( + instance.status != "DELETED") + + def _get_reset_instance_progress(self, context, instance_id): + client = clients.nova(context) + try: + instance = client.servers.get(instance_id) + except clients.novaclient.exceptions.NotFound: + return operation_api.gef_final_progress() + if instance.status != "HARD_REBOOT": + return operation_api.gef_final_progress() diff --git a/gceapi/api/instance_disk_api.py b/gceapi/api/instance_disk_api.py new file mode 100644 index 0000000..e4fc40a --- /dev/null +++ b/gceapi/api/instance_disk_api.py @@ -0,0 +1,146 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import string + +from gceapi.api import base_api +from gceapi.api import clients +from gceapi.api import disk_api +from gceapi.api import operation_api +from gceapi.api import operation_util +from gceapi.api import utils +from gceapi import exception +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + + +class API(base_api.API): + """GCE Attached disk API.""" + + KIND = "attached_disk" + PERSISTENT_ATTRIBUTES = ["id", "instance_name", "volume_id", "name"] + + def __init__(self, *args, **kwargs): + super(API, self).__init__(*args, **kwargs) + operation_api.API().register_get_progress_method( + "attached_disk-add", + self._get_add_item_progress) + operation_api.API().register_get_progress_method( + "attached_disk-delete", + self._get_delete_item_progress) + + def _get_type(self): + return self.KIND + + def _get_persistent_attributes(self): + return self.PERSISTENT_ATTRIBUTES + + def get_item(self, context, instance_name, name): + items = self._get_db_items(context) + items = [i for i in items + if i["instance_name"] == instance_name and i["name"] == name] + if len(items) != 1: + raise exception.NotFound + return items[0] + + def get_items(self, context, instance_name): + items = self._get_db_items(context) + return [i for i in items if i["instance_name"] == instance_name] + + def add_item(self, context, instance_name, source, name): + if not name: + msg = _("There is no name to assign.") + raise exception.InvalidRequest(msg) + + volume_name = utils._extract_name_from_url(source) + if not volume_name: + msg = _("There is no volume to assign.") + raise exception.NotFound(msg) + volume = disk_api.API().get_item(context, volume_name, None) + + nova_client = clients.nova(context) + instances = nova_client.servers.list( + search_opts={"name": instance_name}) + if not instances or len(instances) != 1: + raise exception.NotFound + instance = instances[0] + + devices = list() + volumes_client = nova_client.volumes + for server_volume in volumes_client.get_server_volumes(instance.id): + devices.append(server_volume.device) + device_name = None + for letter in string.ascii_lowercase[1:]: + device_name = "vd" + letter + for device in devices: + if device_name in device: + break + else: + break + else: + raise exception.OverQuota + + operation_util.start_operation(context, self._get_add_item_progress) + volumes_client.create_server_volume( + instance.id, volume["id"], "/dev/" + device_name) + + item = self.register_item(context, instance_name, volume["id"], name) + operation_util.set_item_id(context, item["id"]) + + def register_item(self, context, instance_name, volume_id, name): + if not name: + msg = _("There is no name to assign.") + raise exception.InvalidRequest(msg) + if not volume_id: + msg = _("There is no volume_id to assign.") + raise exception.InvalidRequest(msg) + + new_item = { + "id": instance_name + "-" + volume_id, + "instance_name": instance_name, + "volume_id": volume_id, + "name": name, + } + new_item = self._add_db_item(context, new_item) + return new_item + + def delete_item(self, context, instance_name, name): + item = self.get_item(context, instance_name, name) + volume_id = item["volume_id"] + + nova_client = clients.nova(context) + instances = nova_client.servers.list( + search_opts={"name": instance_name}) + if not instances or len(instances) != 1: + raise exception.NotFound + instance = instances[0] + + operation_util.start_operation(context, + self._get_delete_item_progress, + item["id"]) + nova_client.volumes.delete_server_volume(instance.id, volume_id) + + self._delete_db_item(context, item) + + def unregister_item(self, context, instance_name, name): + item = self.get_item(context, instance_name, name) + self._delete_db_item(context, item) + + def _get_add_item_progress(self, context, dummy_id): + return operation_api.gef_final_progress() + + def _get_delete_item_progress(self, context, dummy_id): + return operation_api.gef_final_progress() diff --git a/gceapi/api/instances.py b/gceapi/api/instances.py new file mode 100644 index 0000000..395b2a5 --- /dev/null +++ b/gceapi/api/instances.py @@ -0,0 +1,157 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from gceapi.api import common as gce_common +from gceapi.api import instance_address_api +from gceapi.api import instance_api +from gceapi.api import instance_disk_api +from gceapi.api import operation_util +from gceapi.api import scopes +from gceapi.api import wsgi as gce_wsgi +from gceapi import exception +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import log as logging + +logger = logging.getLogger(__name__) + + +class Controller(gce_common.Controller): + """GCE Instance controller""" + + def __init__(self, *args, **kwargs): + super(Controller, self).__init__(instance_api.API(), + *args, **kwargs) + self._instance_address_api = instance_address_api.API() + self._instance_disk_api = instance_disk_api.API() + + def format_item(self, request, instance, scope): + result_dict = { + "creationTimestamp": self._format_date(instance["created"]), + "status": instance["status"], + "statusMessage": instance["statusMessage"], + "name": instance["name"], + "machineType": self._qualify(request, + "machineTypes", instance["flavor"]["name"], scope), + "networkInterfaces": [], + "disks": [], + "metadata": { + "kind": "compute#metadata", + "items": list(), + }, + } + + description = instance.get("description", "") + if description: + result_dict["description"] = description + + metadata = instance.get("metadata", {}) + for i in metadata: + result_dict["metadata"]["items"].append( + {"key": i, "value": metadata[i]}) + + for network in instance["addresses"]: + ni = dict() + ni["network"] = self._qualify(request, + "networks", network, + scopes.GlobalScope()) + # NOTE(apavlov): The name of the network interface, generated by + # the server. For network devices, these are eth0, eth1, etc. + # But we provide name of network here because Openstack doesn`t + # have device name. + ni["name"] = network + ni["accessConfigs"] = [] + for address in instance["addresses"][network]: + atype = address["OS-EXT-IPS:type"] + if atype == "fixed" and "networkIP" not in ni: + ni["networkIP"] = address["addr"] + continue + if atype == "floating": + ni["accessConfigs"].append({ + "kind": "compute#accessConfig", + "name": address["name"], + "type": address["type"], + "natIP": address["addr"] + }) + continue + logger.warn(_("Unexpected address for instance '%(i)' in " + "network '%(n)") % {"i": instance["name"], "n": network}) + result_dict["networkInterfaces"].append(ni) + + disk_index = 0 + for volume in instance["volumes"]: + readonly = volume.get("metadata", {}).get("readonly", "False") + google_disk = { + "kind": "compute#attachedDisk", + "index": disk_index, + "type": "PERSISTENT", + "mode": "READ_ONLY" if readonly == "True" else "READ_WRITE", + "source": self._qualify(request, + "disks", volume["display_name"], scope), + "deviceName": volume["device_name"], + "boot": True if volume["bootable"] == "true" else False + } + result_dict["disks"].append(google_disk) + disk_index += 1 + + return self._format_item(request, result_dict, scope) + + def reset_instance(self, req, scope_id, id): + context = self._get_context(req) + scope = self._get_scope(req, scope_id) + operation_util.init_operation(context, "reset", + self._type_name, id, scope) + try: + self._api.reset_instance(context, scope, id) + except (exception.NotFound, KeyError, IndexError): + msg = _("Instance %s could not be found") % id + raise webob.exc.HTTPNotFound(explanation=msg) + + def add_access_config(self, req, body, scope_id, id): + context = self._get_context(req) + scope = self._get_scope(req, scope_id) + operation_util.init_operation(context, "addAccessConfig", + self._type_name, id, scope) + self._instance_address_api.add_item(context, id, + req.params.get('networkInterface'), body.get("natIP"), + body.get("type"), body.get("name")) + + def delete_access_config(self, req, scope_id, id): + context = self._get_context(req) + scope = self._get_scope(req, scope_id) + operation_util.init_operation(context, "deleteAccessConfig", + self._type_name, id, scope) + self._instance_address_api.delete_item(context, id, + req.params.get('accessConfig')) + + def attach_disk(self, req, body, scope_id, id): + context = self._get_context(req) + scope = self._get_scope(req, scope_id) + operation_util.init_operation(context, "attachDisk", + self._type_name, id, scope) + self._instance_disk_api.add_item(context, id, + body["source"], body.get("deviceName")) + + def detach_disk(self, req, scope_id, id): + context = self._get_context(req) + scope = self._get_scope(req, scope_id) + operation_util.init_operation(context, "detachDisk", + self._type_name, id, scope) + self._instance_disk_api.delete_item(context, id, + req.params.get('deviceName')) + + +def create_resource(): + return gce_wsgi.GCEResource(Controller()) diff --git a/gceapi/api/machine_type_api.py b/gceapi/api/machine_type_api.py new file mode 100644 index 0000000..4ea00c0 --- /dev/null +++ b/gceapi/api/machine_type_api.py @@ -0,0 +1,68 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import base_api +from gceapi.api import clients +from gceapi.api import utils +from gceapi.api import zone_api +from gceapi import exception + + +class API(base_api.API): + """GCE Machine types API.""" + + KIND = "machineType" + + def __init__(self, *args, **kwargs): + super(API, self).__init__(*args, **kwargs) + self._zone_api = zone_api.API() + + def _get_type(self): + return self.KIND + + def get_item(self, context, name, scope=None): + client = clients.nova(context) + try: + item = client.flavors.find(name=self._from_gce(name)) + except (clients.novaclient.exceptions.NotFound, + clients.novaclient.exceptions.NoUniqueMatch): + raise exception.NotFound + if not item: + raise exception.NotFound + return self._prepare_item(utils.to_dict(item)) + + def get_items(self, context, scope=None): + client = clients.nova(context) + items = client.flavors.list() + return [self._prepare_item(utils.to_dict(item)) + for item in items] + + def get_scopes(self, context, item): + # TODO(apavlov): too slow for all... + return self._zone_api.get_items_as_scopes(context) + + def get_item_by_id(self, context, machine_type_id): + client = clients.nova(context) + item = client.flavors.get(machine_type_id) + return self._prepare_item(utils.to_dict(item)) + + def _prepare_item(self, item): + item["name"] = self._to_gce(item["name"]) + return item + + def _from_gce(self, name): + return name.replace("-", ".") + + def _to_gce(self, name): + return name.replace(".", "-") diff --git a/gceapi/api/machine_types.py b/gceapi/api/machine_types.py new file mode 100644 index 0000000..02b7ab0 --- /dev/null +++ b/gceapi/api/machine_types.py @@ -0,0 +1,49 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import common as gce_common +from gceapi.api import machine_type_api +from gceapi.api import wsgi as gce_wsgi + + +class Controller(gce_common.Controller): + """GCE Machine types controller""" + + def __init__(self, *args, **kwargs): + super(Controller, self).__init__(machine_type_api.API(), + *args, **kwargs) + + def format_item(self, request, flavor, scope): + result_dict = { + "name": flavor["name"], + "description": "", + "guestCpus": flavor["vcpus"], + "memoryMb": flavor["ram"], + "imageSpaceGb": flavor["disk"], + # NOTE(Alex): Is not supported by Openstack + "maximumPersistentDisks": 0, + # NOTE(Alex): Is not supported by Openstack + "maximumPersistentDisksSizeGb": 0, + } + + if "OS-FLV-EXT-DATA:ephemeral" in flavor: + size = flavor["OS-FLV-EXT-DATA:ephemeral"] + if size > 0: + result_dict["scratchDisks"] = [{"diskGb": size}] + + return self._format_item(request, result_dict, scope) + + +def create_resource(): + return gce_wsgi.GCEResource(Controller()) diff --git a/gceapi/api/network_api.py b/gceapi/api/network_api.py new file mode 100644 index 0000000..819edd9 --- /dev/null +++ b/gceapi/api/network_api.py @@ -0,0 +1,26 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import base_api +from gceapi.api import network_neutron_api +from gceapi.api import network_nova_api + + +class API(base_api.API): + """GCE Network API.""" + + NEUTRON_API_MODULE = network_neutron_api + NOVA_API_MODULE = network_nova_api + + __metaclass__ = base_api.NetSingleton diff --git a/gceapi/api/network_neutron_api.py b/gceapi/api/network_neutron_api.py new file mode 100644 index 0000000..7ebe9d4 --- /dev/null +++ b/gceapi/api/network_neutron_api.py @@ -0,0 +1,143 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +from oslo.config import cfg + +from gceapi.api import base_api +from gceapi.api import clients +from gceapi.api import operation_util +from gceapi import exception +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import log as logging + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class API(base_api.API): + """GCE Network API - neutron implementation.""" + + KIND = "network" + PERSISTENT_ATTRIBUTES = ["id", "creationTimestamp", "description"] + + def __init__(self, *args, **kwargs): + super(API, self).__init__(*args, **kwargs) + self._public_network_name = CONF.public_network + + def _get_type(self): + return self.KIND + + def _get_persistent_attributes(self): + return self.PERSISTENT_ATTRIBUTES + + def get_item(self, context, name, scope=None): + client = clients.neutron(context) + networks = client.list_networks( + tenant_id=context.project_id, name=name)["networks"] + if not networks: + msg = _("Network resource '%s' could not be found.") % name + raise exception.NotFound(msg) + else: + # NOTE(Alex) There might be more than one network with this name. + # TODO(Alex) We have to decide if we should support IDs as + # parameters for names as well and return error if we have + # multi-results when addressed by name. + network = networks[0] + gce_network = self._get_db_item_by_id(context, network["id"]) + return self._prepare_network(client, network, gce_network) + + def get_items(self, context, scope=None): + client = clients.neutron(context) + networks = client.list_networks(tenant_id=context.project_id) + networks = networks["networks"] + gce_networks = self._get_db_items_dict(context) + result_networks = [] + for network in networks: + network = self._prepare_network(client, network, + gce_networks.get(network["id"])) + result_networks.append(network) + self._purge_db(context, result_networks, gce_networks) + return result_networks + + def delete_item(self, context, name, scope=None): + client = clients.neutron(context) + network = self.get_item(context, name) + + self._process_callbacks( + context, base_api._callback_reasons.check_delete, network) + operation_util.start_operation(context) + self._delete_db_item(context, network) + self._process_callbacks( + context, base_api._callback_reasons.pre_delete, network) + + client.delete_network(network["id"]) + + def add_item(self, context, name, body, scope=None): + ip_range = body['IPv4Range'] + gateway = body.get('gatewayIPv4') + if gateway is None: + network_cidr = netaddr.IPNetwork(ip_range) + gateway_ip = netaddr.IPAddress(network_cidr.first + 1) + gateway = str(gateway_ip) + client = clients.neutron(context) + network = None + try: + network = self.get_item(context, name) + except exception.NotFound: + pass + if network is not None: + raise exception.DuplicateVlan + network_body = {} + network_body["network"] = {"name": name} + operation_util.start_operation(context) + network = client.create_network(network_body) + network = network["network"] + if ip_range: + subnet_body = {} + subnet_body["subnet"] = { + # NOTE(Alex) "name": name + ".default_subnet", + # Won't give it a name for now + "network_id": network["id"], + "ip_version": "4", + "cidr": ip_range, + "gateway_ip": gateway} + result_data = client.create_subnet(subnet_body) + subnet_id = result_data["subnet"]["id"] + network = self._prepare_network(client, network) + network["description"] = body.get("description") + network = self._add_db_item(context, network) + self._process_callbacks( + context, base_api._callback_reasons.post_add, + network, subnet_id=subnet_id) + return network + + def _prepare_network(self, client, network, db_network=None): + subnets = network['subnets'] + if subnets and len(subnets) > 0: + subnet = client.show_subnet(subnets[0]) + subnet = subnet["subnet"] + network["subnet_id"] = subnet["id"] + network["IPv4Range"] = subnet.get("cidr", None) + network["gatewayIPv4"] = subnet.get("gateway_ip", None) + return self._prepare_item(network, db_network) + + def get_public_network_id(self, context): + """Get id of public network appointed to GCE in config.""" + client = clients.neutron(context) + search_opts = {"name": self._public_network_name, + "router:external": True} + networks = client.list_networks(**search_opts)["networks"] + return networks[0]["id"] diff --git a/gceapi/api/network_nova_api.py b/gceapi/api/network_nova_api.py new file mode 100644 index 0000000..125e16d --- /dev/null +++ b/gceapi/api/network_nova_api.py @@ -0,0 +1,94 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr + +from gceapi.api import base_api +from gceapi.api import clients +from gceapi.api import operation_util +from gceapi.api import utils +from gceapi import exception + + +class API(base_api.API): + """GCE Network API - nova-network implementation.""" + + KIND = "network" + PERSISTENT_ATTRIBUTES = ["id", "creationTimestamp", "description"] + + def _get_type(self): + return self.KIND + + def _get_persistent_attributes(self): + return self.PERSISTENT_ATTRIBUTES + + def get_item(self, context, name, scope=None): + client = clients.nova(context) + network = client.networks.find(label=name) + gce_network = self._get_db_item_by_id(context, network.id) + return self._prepare_network(utils.to_dict(network), gce_network) + + def get_items(self, context, scope=None): + client = clients.nova(context) + networks = client.networks.list() + gce_networks = self._get_db_items_dict(context) + result_networks = [] + for network in networks: + result_networks.append( + self._prepare_network(utils.to_dict(network), + gce_networks.get(network["id"]))) + self._purge_db(context, result_networks, gce_networks) + return result_networks + + def delete_item(self, context, name, scope=None): + network = self.get_item(context, name) + self._process_callbacks( + context, base_api._callback_reasons.check_delete, network) + operation_util.start_operation(context) + self._delete_db_item(context, network) + self._process_callbacks( + context, base_api._callback_reasons.pre_delete, network) + client = clients.nova(context) + client.networks.delete(network["id"]) + + def add_item(self, context, name, body, scope=None): + ip_range = body['IPv4Range'] + gateway = body.get('gatewayIPv4') + if gateway is None: + network_cidr = netaddr.IPNetwork(ip_range) + gateway_ip = netaddr.IPAddress(network_cidr.first + 1) + gateway = str(gateway_ip) + network = None + try: + network = self.get_item(context, name) + except clients.novaclient.exceptions.NotFound: + pass + if network is not None: + raise exception.DuplicateVlan + kwargs = {'label': name, 'cidr': ip_range, 'gateway': gateway} + client = clients.nova(context) + operation_util.start_operation(context) + network = client.networks.create(**kwargs) + network = self._prepare_network(utils.to_dict(network)) + if "description" in body: + network["description"] = body["description"] + return self._add_db_item(context, network) + + def _prepare_network(self, network, db_data=None): + return self._prepare_item({ + 'name': network['label'], + 'IPv4Range': network['cidr'], + 'gatewayIPv4': network['gateway'], + 'id': network['id']}, + db_data) diff --git a/gceapi/api/networks.py b/gceapi/api/networks.py new file mode 100644 index 0000000..b7fc791 --- /dev/null +++ b/gceapi/api/networks.py @@ -0,0 +1,40 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import common as gce_common +from gceapi.api import network_api +from gceapi.api import wsgi as gce_wsgi + + +class Controller(gce_common.Controller): + """GCE Network controller""" + + def __init__(self, *args, **kwargs): + super(Controller, self).__init__(network_api.API(), *args, **kwargs) + + def format_item(self, request, network, scope): + result_dict = { + "name": network["name"], + "IPv4Range": network.get("IPv4Range", ""), + "gatewayIPv4": network.get("gatewayIPv4", ""), + "creationTimestamp": network.get("creationTimestamp", ""), + } + if "description" in network: + result_dict["description"] = network["description"] + + return self._format_item(request, result_dict, scope) + + +def create_resource(): + return gce_wsgi.GCEResource(Controller()) diff --git a/gceapi/api/oauth.py b/gceapi/api/oauth.py new file mode 100644 index 0000000..77d60fa --- /dev/null +++ b/gceapi/api/oauth.py @@ -0,0 +1,239 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import json +import time +import uuid + +from keystoneclient import exceptions +from keystoneclient.v2_0 import client as keystone_client +from oslo.config import cfg +import webob + +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import log as logging +from gceapi.openstack.common import timeutils +from gceapi import wsgi_ext as openstack_wsgi + +FLAGS = cfg.CONF +LOG = logging.getLogger(__name__) + + +INTERNAL_GCUTIL_PROJECTS = ["debian-cloud", "centos-cloud", "google"] + + +class OAuthFault(openstack_wsgi.Fault): + """Fault compliant with RFC + + To prevent extra info added by openstack.wsgi.Fault class + to response which is not compliant RFC6749. + """ + @webob.dec.wsgify(RequestClass=openstack_wsgi.Request) + def __call__(self, req): + return self.wrapped_exc + + +class Controller(object): + """Simple OAuth2.0 Controller + + If you need other apps to work with GCE API you should add it here + in VALID_CLIENTS. + Based on https://developers.google.com/accounts/docs/OAuth2InstalledApp + and on RFC 6749(paragraph 4.1). + """ + + AUTH_TIMEOUT = 300 + VALID_CLIENTS = { + "32555940559.apps.googleusercontent.com": "ZmssLNjJy2998hD4CTg2ejr2"} + + INTERNAL_REDIRECT_URI = "urn:ietf:wg:oauth:2.0:oob" + AUTH_PAGE_TEMPLATE =\ + ""\ + ""\ + "Enter Openstack username and password to access GCE API
"\ + "
"\ + "
"\ + ""\ + ""\ + "
"\ + "
"\ + ""\ + "
"\ + "" + + class Client: + auth_start_time = 0 + auth_token = None + expires_in = 1 + + # NOTE(apavlov): there is no cleaning of the dictionary + _clients = {} + + def _check_redirect_uri(self, uri): + if uri is None: + msg = _("redirect_uri should be present") + raise webob.exc.HTTPBadRequest(explanation=msg) + if "localhost" not in uri and uri != self.INTERNAL_REDIRECT_URI: + msg = _("redirect_uri has invalid format." + "it must confirms installed application uri of GCE") + json_body = {"error": "invalid_request", + "error_description": msg} + raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body)) + + def auth(self, req): + """OAuth protocol authorization endpoint handler + + Returns login authorization webpage invoked for example by gcutil auth. + """ + client_id = req.GET.get("client_id") + if client_id is None or client_id not in self.VALID_CLIENTS: + json_body = {"error": "unauthorized_client"} + raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body)) + + if req.GET.get("response_type") != "code": + json_body = {"error": "unsupported_response_type"} + raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body)) + self._check_redirect_uri(req.GET.get("redirect_uri")) + + code = base64.urlsafe_b64encode(uuid.uuid4().bytes).replace('=', '') + self._clients[code] = self.Client() + self._clients[code].auth_start_time = time.time() + + html_page = self.AUTH_PAGE_TEMPLATE.format( + redirect_uri=req.GET.get("redirect_uri"), + code=code) + return html_page + + def approval(self, req): + """OAuth protocol authorization endpoint handler second part + + Returns webpage with verification code or redirects to provided + redirect_uri specified in auth request. + """ + code = req.POST.get("code") + if code is None: + json_body = {"error": "invalid_request"} + raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body)) + + client = self._clients.get(code) + if client is None: + json_body = {"error": "invalid_client"} + raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body)) + + if time.time() - client.auth_start_time > self.AUTH_TIMEOUT: + raise webob.exc.HTTPRequestTimeout() + + redirect_uri = req.POST.get("redirect_uri") + self._check_redirect_uri(redirect_uri) + + username = req.POST.get("username") + password = req.POST.get("password") + + try: + keystone = keystone_client.Client( + username=username, + password=password, + auth_url=FLAGS.keystone_gce_url) + token = keystone.auth_ref["token"] + client.auth_token = token["id"] + s = timeutils.parse_isotime(token["issued_at"]) + e = timeutils.parse_isotime(token["expires"]) + client.expires_in = (e - s).seconds + except Exception as ex: + return webob.exc.HTTPUnauthorized(ex) + + if redirect_uri == self.INTERNAL_REDIRECT_URI: + return "Verification code is: "\ + + code + "" + + uri = redirect_uri + "?code=" + code + raise webob.exc.HTTPFound(location=uri) + + def token(self, req): + """OAuth protocol authorization endpoint handler second part + + Returns json with tokens(access_token and optionally refresh_token). + """ + client_id = req.POST.get("client_id") + if client_id is None or client_id not in self.VALID_CLIENTS: + json_body = {"error": "unauthorized_client"} + raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body)) + valid_secret = self.VALID_CLIENTS[client_id] + client_secret = req.POST.get("client_secret") + if client_secret is None or client_secret != valid_secret: + json_body = {"error": "unauthorized_client"} + raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body)) + + if req.POST.get("grant_type") != "authorization_code": + json_body = {"error": "unsupported_grant_type"} + raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body)) + + code = req.POST.get("code") + client = self._clients.get(code) + if client is None: + json_body = {"error": "invalid_client"} + raise OAuthFault(webob.exc.HTTPBadRequest(json_body=json_body)) + + result = {"access_token": client.auth_token, + "expires_in": client.expires_in, + "token_type": "Bearer"} + return json.dumps(result) + + +class AuthProtocol(object): + """Filter for translating oauth token to keystone token.""" + def __init__(self, app): + self.app = app + self.keystone_url = FLAGS.keystone_gce_url + + def __call__(self, env, start_response): + auth_token = env.get("HTTP_AUTHORIZATION") + if auth_token is None: + return self._reject_request(start_response) + + project = env["PATH_INFO"].split("/")[1] + try: + keystone = keystone_client.Client( + token=auth_token.split()[1], + tenant_name=project, + force_new_token=True, + auth_url=self.keystone_url) + env["HTTP_X_AUTH_TOKEN"] = keystone.auth_ref["token"]["id"] + return self.app(env, start_response) + except exceptions.Unauthorized: + if project in INTERNAL_GCUTIL_PROJECTS: + # NOTE(apavlov): return empty if no such projects(by gcutil) + headers = [('Content-type', 'application/json;charset=UTF-8')] + start_response('200 Ok', headers) + return ["{}"] + + return self._reject_request(start_response) + + def _reject_request(self, start_response): + headers = [('Content-type', 'application/json;charset=UTF-8')] + start_response('401 Unauthorized', headers) + json_body = {"error": "access_denied"} + return [json.dumps(json_body)] + + +def filter_factory(global_conf, **local_conf): + def auth_filter(app): + return AuthProtocol(app) + return auth_filter + + +def create_resource(): + return openstack_wsgi.Resource(Controller()) diff --git a/gceapi/api/operation_api.py b/gceapi/api/operation_api.py new file mode 100644 index 0000000..59af04d --- /dev/null +++ b/gceapi/api/operation_api.py @@ -0,0 +1,168 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from gceapi.api import base_api +from gceapi.api import scopes +from gceapi import exception +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import timeutils + + +class API(base_api.API): + """GCE operation API.""" + + KIND = "operation" + PERSISTENT_ATTRIBUTES = ["id", "insert_time", "start_time", "end_time", + "name", "type", "user", "status", "progress", + "scope_type", "scope_name", + "target_type", "target_name", + "method_key", "item_id", + "error_code", "error_message", "errors"] + + def __init__(self, *args, **kwargs): + super(API, self).__init__(*args, **kwargs) + self._method_keys = {} + self._get_progress_methods = {} + + def _get_type(self): + return self.KIND + + def _get_persistent_attributes(self): + return self.PERSISTENT_ATTRIBUTES + + def register_get_progress_method(self, method_key, method): + if method_key in self._get_progress_methods: + raise exception.Invalid() + # TODO(ft): check 'method' formal arguments + self._method_keys[method] = method_key + self._get_progress_methods[method_key] = method + + def get_scopes(self, context, item): + return [scopes.construct(item["scope_type"], item["scope_name"])] + + def get_item(self, context, name, scope=None): + operation = self._get_db_item_by_name(context, name) + if (operation is None or + operation["scope_type"] != scope.get_type() or + operation["scope_name"] != scope.get_name()): + raise exception.NotFound + operation = self._update_operation_progress(context, operation) + return operation + + def get_items(self, context, scope=None): + operations = self._get_db_items(context) + if scope is not None: + operations = [operation for operation in operations + if (operation["scope_type"] == scope.get_type() and + operation["scope_name"] == scope.get_name())] + for operation in operations: + operation = self._update_operation_progress(context, operation) + return operations + + def delete_item(self, context, name, scope=None): + # NOTE(ft): Google deletes operation with no check it's scope + item = self._get_db_item_by_name(context, name) + if item is None: + raise exception.NotFound + self._delete_db_item(context, item) + + def _update_operation_progress(self, context, operation): + if operation["status"] == "DONE" or not operation.get("item_id"): + return operation + method_key = operation["method_key"] + get_progress = self._get_progress_methods[method_key] + operation_progress = get_progress(context, operation["item_id"]) + if operation_progress is None: + return operation + operation.update(operation_progress) + if operation["progress"] == 100: + operation["status"] = "DONE" + operation["end_time"] = timeutils.isotime(None, True) + self._update_db_item(context, operation) + return operation + + def construct_operation(self, context, op_type, target_type, target_name, + scope): + operation_id = str(uuid.uuid4()) + operation = { + "id": operation_id, + "name": "operation-" + operation_id, + "insert_time": timeutils.isotime(context.timestamp, True), + "user": context.user_name, + "type": op_type, + "target_type": target_type, + "target_name": target_name, + "scope_type": scope.get_type(), + "scope_name": scope.get_name(), + } + return operation + + def save_operation(self, context, operation, start_time, + get_progress_method, item_id, operation_result): + if isinstance(operation_result, Exception): + operation.update(_error_from_exception(operation_result)) + operation["start_time"] = start_time + method_key = self._method_keys.get(get_progress_method) + if method_key is None or "error_code" in operation: + operation["progress"] = 100 + operation["status"] = "DONE" + operation["end_time"] = timeutils.isotime(None, True) + else: + operation["progress"] = 0 + operation["status"] = "RUNNING" + operation["method_key"] = method_key + if item_id is not None: + operation["item_id"] = item_id + return self._add_db_item(context, operation) + + def update_operation(self, context, operation_id, operation_result): + operation = self._get_db_item_by_id(context, operation_id) + if operation is None: + # NOTE(ft): it may lead to hungup not finished operation in DB + return + if isinstance(operation_result, Exception): + operation.update(_error_from_exception(operation_result)) + else: + operation.update(operation_result) + if operation["progress"] == 100 or "error_code" in operation: + operation["status"] = "DONE" + operation["end_time"] = timeutils.isotime(None, True) + operation.update(operation) + self._update_db_item(context, operation) + + +def gef_final_progress(with_error=False): + progress = {"progress": 100} + if with_error: + progress["error_code"] = 500 + progress["error_message"] = _('Internal server error') + progress["errors"] = [{ + "code": "UNKNOWN_OS_ERROR", + "message": _("Operation finished with unknown error. " + "See OpenStack logs.") + }] + return progress + + +def is_final_progress(progress): + return progress is not None and (progress.get("progress") == 100 or + progress.get("error_code") is not None) + + +def _error_from_exception(ex): + return {"errors": [{"code": ex.__class__.__name__, "message": str(ex)}], + "error_code": 500, + "error_message": _('Internal server error')} diff --git a/gceapi/api/operation_util.py b/gceapi/api/operation_util.py new file mode 100644 index 0000000..c8d8b02 --- /dev/null +++ b/gceapi/api/operation_util.py @@ -0,0 +1,73 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import threading + +from gceapi.api import operation_api +from gceapi.openstack.common import timeutils + + +def init_operation(context, op_type, target_type, target_name, scope): + if context.operation is not None: + return + operation = operation_api.API().construct_operation( + context, op_type, target_type, target_name, scope) + context.operation = operation + return operation + + +def save_operaton(context, action_result): + if context.operation is None or context.operation_start_time is None: + return None + return operation_api.API().save_operation( + context, + context.operation, + context.operation_start_time, + context.operation_get_progress_method, + context.operation_item_id, + action_result) + + +def start_operation(context, get_progress_method=None, item_id=None): + if context.operation is None or context.operation_start_time is not None: + return + context.operation_start_time = timeutils.isotime(None, True) + context.operation_get_progress_method = get_progress_method + context.operation_item_id = item_id + set_item_id(context, item_id) + + +def set_item_id(context, item_id): + if context.operation is None or context.operation_start_time is None: + return + context.operation_item_id = item_id + + +def continue_operation(context, func, timeout=5): + threading.Timer(timeout, _continue_operation, [context, func]).start() + + +def _continue_operation(context, func): + operation = context.operation + try: + operation_result = func() + except Exception as ex: + operation_result = ex + if operation is None: + return + if operation_result is None: + continue_operation(context, func, timeout=2) + else: + operation_api.API().update_operation(context, operation["id"], + operation_result) diff --git a/gceapi/api/operations.py b/gceapi/api/operations.py new file mode 100644 index 0000000..6395087 --- /dev/null +++ b/gceapi/api/operations.py @@ -0,0 +1,31 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import common as gce_common +from gceapi.api import operation_api +from gceapi.api import wsgi as gce_wsgi + + +class Controller(gce_common.Controller): + """GCE Route controller""" + + def __init__(self, *args, **kwargs): + super(Controller, self).__init__(operation_api.API(), *args, **kwargs) + + def format_item(self, request, operation, scope): + return self._format_operation(request, operation, scope) + + +def create_resource(): + return gce_wsgi.GCEResource(Controller()) diff --git a/gceapi/api/project_api.py b/gceapi/api/project_api.py new file mode 100644 index 0000000..5edfd8a --- /dev/null +++ b/gceapi/api/project_api.py @@ -0,0 +1,103 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import base_api +from gceapi.api import clients +from gceapi.api import operation_util +from gceapi.api import utils +from gceapi import exception + + +class API(base_api.API): + """GCE Projects API.""" + + KIND = "project" + + def _get_type(self): + return self.KIND + + def get_item(self, context, name, scope=None): + project_name = context.project_name + + keystone = clients.keystone(context) + project = [t for t in keystone.tenants.list() + if t.name == project_name][0] + + result = utils.to_dict(project) + result["keypair"] = self._get_gce_keypair(context) + project_id = project.id + + nova_limits = clients.nova(context).limits.get(tenant_id=project_id) + result["nova_limits"] = dict((l.name, l.value) + for l in nova_limits.absolute) + + cinder_client = clients.cinder(context) + result["cinder_quotas"] = utils.to_dict( + cinder_client.quotas.get(project_id, usage=True)) + + neutron_client = clients.neutron(context) + result["neutron_quota"] = ( + neutron_client.show_quota(project_id)["quota"]) + result["neutron_quota"]["network_used"] = len(neutron_client + .list_networks(tenant_id=project_id)["networks"]) + result["neutron_quota"]["floatingip_used"] = len(neutron_client + .list_floatingips(tenant_id=project_id)["floatingips"]) + result["neutron_quota"]["security_group_used"] = len(neutron_client + .list_security_groups(tenant_id=project_id)["security_groups"]) + return result + + def get_items(self, context, scope=None): + raise exception.NotFound + + def set_common_instance_metadata(self, context, metadata_list): + instance_metadata = dict( + [(x['key'], x['value']) for x in metadata_list]) + operation_util.start_operation(context) + ssh_keys = instance_metadata.pop('sshKeys', None) + if ssh_keys: + nova_client = clients.nova(context) + for key_data in ssh_keys.split('\n'): + user_name, ssh_key = key_data.split(":") + self._update_key(nova_client, user_name, ssh_key) + + def get_gce_user_keypair_name(self, context): + client = clients.nova(context) + for keypair in client.keypairs.list(): + if keypair.name == context.user_name: + return keypair.name + + return None + + def _get_gce_keypair(self, context): + client = clients.nova(context) + key_datas = [] + for keypair in client.keypairs.list(): + key_datas.append(keypair.name + ':' + keypair.public_key) + + if not key_datas: + return None + + return {'key': 'sshKeys', 'value': "\n".join(key_datas)} + + def _update_key(self, nova_client, user_name, ssh_key): + try: + keypair = nova_client.keypairs.get(user_name) + if keypair.public_key == ssh_key: + return + + keypair.delete() + except clients.novaclient.exceptions.NotFound: + pass + + keypair = nova_client.keypairs.create(user_name, ssh_key) diff --git a/gceapi/api/projects.py b/gceapi/api/projects.py new file mode 100644 index 0000000..5b3e178 --- /dev/null +++ b/gceapi/api/projects.py @@ -0,0 +1,104 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from gceapi import exception + +from gceapi.api import common as gce_common +from gceapi.api import operation_util +from gceapi.api import project_api +from gceapi.api import scopes +from gceapi.api import wsgi as gce_wsgi +from gceapi.openstack.common.gettextutils import _ + + +class Controller(gce_common.Controller): + """GCE Projects controller""" + + def __init__(self, *args, **kwargs): + super(Controller, self).__init__(project_api.API(), *args, **kwargs) + + def format_item(self, request, project, scope): + desc = project["description"] + result_dict = { + "name": project["name"], + "description": desc if desc else "", + "commonInstanceMetadata": { + "kind": "compute#metadata", + "items": [project["keypair"]] + } if project["keypair"] else { + "kind": "compute#metadata", + }, + "quotas": [] + } + + self._add_quota(result_dict["quotas"], "CPU", + project["nova_limits"].get("maxTotalCores", -1), + project["nova_limits"].get("totalCoresUsed", -1)) + self._add_quota(result_dict["quotas"], "INSTANCES", + project["nova_limits"].get("maxTotalInstances", -1), + project["nova_limits"].get("totalInstancesUsed", -1)) + + quota = project["cinder_quotas"].get("gigabytes", {}) + self._add_quota(result_dict["quotas"], "DISKS_TOTAL_GB", + quota.get("limit", -1), quota.get("in_use", -1)) + quota = project["cinder_quotas"].get("snapshots", {}) + self._add_quota(result_dict["quotas"], "SNAPSHOTS", + quota.get("limit", -1), quota.get("in_use", -1)) + quota = project["cinder_quotas"].get("volumes", {}) + self._add_quota(result_dict["quotas"], "DISKS", + quota.get("limit", -1), quota.get("in_use", -1)) + + self._add_quota(result_dict["quotas"], "FIREWALLS", + project["neutron_quota"].get("security_group", -1), + project["neutron_quota"].get("security_group_used", -1)) + self._add_quota(result_dict["quotas"], "STATIC_ADDRESSES", + project["neutron_quota"].get("floatingip", -1), + project["neutron_quota"].get("floatingip_used", -1)) + self._add_quota(result_dict["quotas"], "NETWORKS", + project["neutron_quota"].get("network", -1), + project["neutron_quota"].get("network_used", -1)) + + return self._format_item(request, result_dict, scope) + + def set_common_instance_metadata(self, req, body): + context = self._get_context(req) + operation_util.init_operation(context, "setMetadata", self._type_name, + None, scopes.GlobalScope()) + try: + self._api.set_common_instance_metadata( + context, body.get("items", [])) + except exception.KeypairLimitExceeded: + msg = _("Quota exceeded, too many key pairs.") + raise webob.exc.HTTPRequestEntityTooLarge( + explanation=msg, + headers={'Retry-After': 0}) + except exception.InvalidKeypair: + msg = _("Keypair data is invalid") + raise webob.exc.HTTPBadRequest(explanation=msg) + except exception.KeyPairExists: + msg = _("Key pair already exists.") + raise webob.exc.HTTPConflict(explanation=msg) + + def _add_quota(self, quotas, metric, limit, usage): + quotas.append({ + "metric": metric, + "limit": float(limit), + "usage": float(usage), + }) + + +def create_resource(): + return gce_wsgi.GCEResource(Controller()) diff --git a/gceapi/api/region_api.py b/gceapi/api/region_api.py new file mode 100644 index 0000000..528fa11 --- /dev/null +++ b/gceapi/api/region_api.py @@ -0,0 +1,43 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import base_api +from gceapi.api import scopes +from gceapi import exception + + +class API(base_api.API): + """GCE Regions API + + Stubbed now for support only one predefined region nova + """ + + KIND = "region" + _REGIONS = ["nova"] + + def _get_type(self): + return self.KIND + + def get_item(self, context, name, scope=None): + regions = self.get_items(context) + for region in regions: + if region["name"] == name: + return region + raise exception.NotFound + + def get_items(self, context, scope=None): + return [dict(("name", region) for region in self._REGIONS)] + + def get_items_as_scopes(self, context): + return [scopes.RegionScope(region) for region in self._REGIONS] diff --git a/gceapi/api/regions.py b/gceapi/api/regions.py new file mode 100644 index 0000000..e7081b7 --- /dev/null +++ b/gceapi/api/regions.py @@ -0,0 +1,41 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import common as gce_common +from gceapi.api import region_api +from gceapi.api import wsgi as gce_wsgi +from gceapi.api import zone_api + + +class Controller(gce_common.Controller): + """GCE Regions controller.""" + + def __init__(self, *args, **kwargs): + super(Controller, self).__init__(region_api.API(), *args, **kwargs) + self._zone_api = zone_api.API() + + def format_item(self, req, region, scope): + zones = self._zone_api.get_items(self._get_context(req), scope) + result_dict = { + "name": region["name"], + "status": "UP", + "zones": [self._qualify(req, "zones", zone["name"], None) + for zone in zones] + } + + return self._format_item(req, result_dict, scope) + + +def create_resource(): + return gce_wsgi.GCEResource(Controller()) diff --git a/gceapi/api/route_api.py b/gceapi/api/route_api.py new file mode 100644 index 0000000..ba60d7c --- /dev/null +++ b/gceapi/api/route_api.py @@ -0,0 +1,26 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import base_api +from gceapi.api import route_neutron_api +from gceapi.api import route_nova_api + + +class API(base_api.API): + """GCE Route API.""" + + NEUTRON_API_MODULE = route_neutron_api + NOVA_API_MODULE = route_nova_api + + __metaclass__ = base_api.NetSingleton diff --git a/gceapi/api/route_neutron_api.py b/gceapi/api/route_neutron_api.py new file mode 100644 index 0000000..ce7f47a --- /dev/null +++ b/gceapi/api/route_neutron_api.py @@ -0,0 +1,409 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +import string + +from gceapi.api import base_api +from gceapi.api import clients +from gceapi.api import network_api +from gceapi.api import operation_util +from gceapi.api import utils +from gceapi import exception +from gceapi.openstack.common.gettextutils import _ + + +ALL_IP_CIDR = "0.0.0.0/0" + + +class API(base_api.API): + """GCE Address API - neutron implementation.""" + + KIND = "route" + PERSISTENT_ATTRIBUTES = ["id", "creationTimestamp", "description", + "is_default"] + TRANS_TABLE = string.maketrans("./", "--") + + def __init__(self, *args, **kwargs): + super(API, self).__init__(*args, **kwargs) + network_api.API()._register_callback( + base_api._callback_reasons.post_add, + self._create_network_router) + network_api.API()._register_callback( + base_api._callback_reasons.check_delete, + self._check_delete_network) + network_api.API()._register_callback( + base_api._callback_reasons.pre_delete, + self._delete_network_router) + + def _get_type(self): + return self.KIND + + def _get_persistent_attributes(self): + return self.PERSISTENT_ATTRIBUTES + + def get_item(self, context, name, scope=None): + routes, dummy = self._sync_routes(context) + return routes[name] + + def get_items(self, context, scope=None): + routes, dummy = self._sync_routes(context) + return routes.values() + + def delete_item(self, context, name, scope=None): + routes, aliased_routes = self._sync_routes(context) + route = routes[name] + if route.get("nexthop") is None: + raise exception.InvalidInput( + _("The local route cannot be deleted.")) + destination = route["destination"] + nexthop = route["nexthop"] + # NOTE(ft): delete OS route only if it doesn't have aliases + # at the moment + client = clients.neutron(context) + operation_util.start_operation(context) + if self._get_route_key(route) not in aliased_routes: + dummy, router = self._get_network_objects(client, + route["network"]) + if "external_gateway_info" in route: + client.remove_gateway_router(router["id"]) + else: + routes = [r for r in router["routes"] + if (destination != r["destination"] or + nexthop != r["nexthop"])] + client.update_router( + router["id"], + {"router": {"routes": routes, }, }) + self._delete_db_item(context, route) + + def add_item(self, context, name, body, scope=None): + routes, dummy = self._sync_routes(context) + if name in routes: + raise exception.InvalidInput( + _("The resource '%s' already exists.") % name) + + # NOTE(ft): check network is plugged to router + network_name = utils._extract_name_from_url(body["network"]) + network = network_api.API().get_item(context, network_name) + + nexthop = body.get("nextHopGateway") + if (nexthop is not None and + (utils._extract_name_from_url(nexthop) == + "default-internet-gateway") and + # NOTE(ft): OS doesn't support IP mask for external gateway + body.get("destRange") == ALL_IP_CIDR): + operation_util.start_operation(context) + return self._create_internet_route(context, network, body) + + nexthop = body.get("nextHopIp") + if nexthop is not None: + operation_util.start_operation(context) + return self._create_custom_route(context, network, body) + + raise exception.InvalidInput(_("Unsupported route.")) + + def _create_internet_route(self, context, network, body): + client = clients.neutron(context) + port, router = self._get_network_objects(client, network) + public_network_id = network_api.API().get_public_network_id(context) + external_gateway_info = {"network_id": public_network_id} + router = client.add_gateway_router( + router["id"], + external_gateway_info)["router"] + gateway_port = client.list_ports( + device_id=router["id"], + device_owner="network:router_gateway")["ports"][0] + route = self._add_gce_route(context, network, port, body, + is_default=False, + destination=gateway_port["id"], + nexthop=ALL_IP_CIDR) + route["network"] = network + route["port"] = port + route["external_gateway_info"] = external_gateway_info + return route + + def _create_custom_route(self, context, network, body): + client = clients.neutron(context) + port, router = self._get_network_objects(client, network) + destination = body.get("destRange") + nexthop = body.get("nextHopIp") + routes = router["routes"] + if all(r["destination"] != destination or r["nexthop"] != nexthop + for r in routes): + routes.append({ + "destination": destination, + "nexthop": nexthop, + }) + client.update_router( + router["id"], + {"router": {"routes": router["routes"], }, }) + route = self._add_gce_route(context, network, port, body, + is_default=False, destination=destination, + nexthop=nexthop) + route["network"] = network + route["port"] = port + return route + + def _sync_routes(self, context): + os_routes = self._get_os_routes(context) + gce_routes = self._get_gce_routes(context) + aliased_routes = {} + routes = {} + for (key, os_route) in os_routes.items(): + gce_route_list = gce_routes.pop(key, None) + if gce_route_list is None: + continue + for gce_route in gce_route_list: + routes[gce_route["name"]] = dict(os_route, **dict(gce_route)) + os_routes.pop(key) + if len(gce_route_list) > 1: + aliased_routes[key] = gce_route_list + + # NOTE(ft): add new named routes + for os_route in os_routes.itervalues(): + network = os_route["network"] + port = os_route["port"] + route = self._add_gce_route(context, network, port, os_route, + is_default=True, + creationTimestamp="") + os_route.update(route) + routes[os_route["name"]] = os_route + + # NOTE(ft): delete obsolete named routes + for gce_route_list in gce_routes.itervalues(): + for gce_route in gce_route_list: + self._delete_db_item(context, gce_route) + return (routes, aliased_routes) + + def _get_gce_routes(self, context): + gce_routes = self._get_db_items(context) + gce_routes_dict = {} + for route in gce_routes: + route = self._unpack_route_from_db_format(route) + key = self._get_route_key(route) + val_array = gce_routes_dict.get(key) + if val_array is None: + gce_routes_dict[key] = [route] + else: + val_array.append(route) + return gce_routes_dict + + def _get_route_key(self, route): + if route["port_id"] is None: + return route["network_id"] + else: + return (route["network_id"] + route["port_id"] + + route["destination"] + route["nexthop"]) + + def _get_os_routes(self, context): + client = clients.neutron(context) + routers = client.list_routers(tenant_id=context.project_id)["routers"] + routers = dict((r["id"], r) for r in routers) + ports = client.list_ports( + tenant_id=context.project_id, + device_owner="network:router_interface")["ports"] + ports = dict((p["network_id"], p) for p in ports) + gateway_ports = client.list_ports( + device_owner="network:router_gateway")["ports"] + gateway_ports = dict((p["device_id"], p) for p in gateway_ports) + routes = {} + networks = network_api.API().get_items(context) + for network in networks: + # NOTE(ft): append local route + network_id = network["id"] + routes[network_id] = self._init_local_route(network) + + port = ports.get(network_id) + if port is None: + continue + router = routers.get(port["device_id"]) + if router is None: + continue + key_prefix = network_id + port["id"] + + # NOTE(ft): append internet route + external_gateway_info = router.get("external_gateway_info") + gateway_port = gateway_ports.get(router["id"]) + if (external_gateway_info is not None and + gateway_port is not None): + key = key_prefix + ALL_IP_CIDR + gateway_port["id"] + routes[key] = self._init_internet_route( + network, port, gateway_port["id"], + external_gateway_info) + + # NOTE(ft): append other routes + for route in router["routes"]: + destination = route["destination"] + nexthop = route["nexthop"] + key = key_prefix + destination + nexthop + routes[key] = self._init_custom_route( + network, port, destination, nexthop) + return routes + + def _get_network_objects(self, client, network): + subnet_id = network.get("subnet_id") + if subnet_id is None: + raise exception.PortNotFound(_("Network has no router.")) + ports = client.list_ports( + network_id=network["id"], + device_owner="network:router_interface")["ports"] + port = next((p for p in ports + if any(fip["subnet_id"] == subnet_id + for fip in p["fixed_ips"])), None) + if port is None: + raise exception.PortNotFound(_("Network has no router.")) + router = client.show_router(port["device_id"])["router"] + return (port, router) + + def _create_network_router(self, context, network, subnet_id): + public_network_id = network_api.API().get_public_network_id(context) + client = clients.neutron(context) + router = client.create_router(body={"router": { + "name": network["name"], + "admin_state_up": True, + "external_gateway_info": {"network_id": public_network_id}, + }})["router"] + client.add_interface_router(router["id"], {"subnet_id": subnet_id}) + + def _check_delete_network(self, context, network): + network_id = network["id"] + # NOTE(ft): check non default routes not longer exists + # must be done for internet routes + routes, dummy = self._sync_routes(context) + for route in routes.itervalues(): + if (route["network_id"] == network_id and + not route["is_default"]): + raise exception.InvalidInput(_("Network contains routes")) + # NOTE(ft): check invisible routes not longer exists + # must be done for routes on non default subnet and other non GCE stuff + client = clients.neutron(context) + checked_routers = set() + subnets = client.list_subnets(network_id=network_id)["subnets"] + cidrs = [netaddr.IPNetwork(subnet["cidr"]) for subnet in subnets] + ports = client.list_ports( + network_id=network["id"], + device_owner="network:router_interface")["ports"] + for port in ports: + if port["device_id"] in checked_routers: + continue + checked_routers.add(port["device_id"]) + router = client.show_router(port["device_id"])["router"] + for route in router["routes"]: + nexthop = netaddr.IPAddress(route["nexthop"]) + if any(nexthop in cidr for cidr in cidrs): + raise exception.InvalidInput(_("Network contains routes")) + # TODO(ft): here is the good place to create default routes in DB + # now thew will be created on next 'route' request, + # but 'creationTimestamp' will be absent + + def _delete_network_router(self, context, network): + client = clients.neutron(context) + ports = client.list_ports( + network_id=network["id"], + device_owner="network:router_interface")["ports"] + router_ids = set() + for port in ports: + if port["device_owner"] == "network:router_interface": + router_ids.add(port["device_id"]) + client.remove_interface_router(port["device_id"], + {"port_id": port["id"]}) + # NOTE(ft): leave routers if network is plugged to more than one route + # because it's look like some non GCE settings, so we don't want + # to decide whether we can delete router or not + if len(router_ids) != 1: + return + router = router_ids.pop() + # NOTE(ft): leave router if other subnets are plugged to it + ports = client.list_ports( + device_id=router, + device_owner="network:router_interface")["ports"] + if len(ports) == 0: + client.delete_router(router) + # TODO(ft): here is the good place to purge DB from routes + + def _add_gce_route(self, context, network, port, route, **kwargs): + db_route = {} + for key in self.PERSISTENT_ATTRIBUTES: + value = route.get(key) + if value is None: + value = kwargs.get(key) + if value is not None or key in kwargs: + db_route[key] = value + + def get_from_dicts(key, dict1, dict2, default=None): + value = dict1.get(key) + if value is None: + value = dict2.get(key) + return value if value is not None else default + + route_id = "//".join([network["id"], + port["id"] if port is not None else "", + get_from_dicts("destination", route, kwargs), + get_from_dicts("nexthop", route, kwargs, ""), + get_from_dicts("name", route, kwargs)]) + db_route["id"] = route_id + db_route = self._add_db_item(context, db_route) + return self._unpack_route_from_db_format(db_route) + + def _unpack_route_from_db_format(self, route): + parts = route["id"].split("//") + route["network_id"] = parts[0] + route["port_id"] = parts[1] if parts[1] != "" else None + route["destination"] = parts[2] + route["nexthop"] = parts[3] if parts[3] != "" else None + route["name"] = parts[4] + return route + + def _init_local_route(self, network): + return { + "id": None, + "name": "default-route-%s-local" % network["id"], + "description": "Default route to the virtual network.", + "network": network, + "port": None, + "destination": network.get("IPv4Range", ""), + "nexthop": None, + "is_default": True, + } + + def _init_internet_route(self, network, port, nexthop, gateway_info): + return { + "id": None, + "name": "default-route-%s-internet" % network["id"], + "description": "Default route to the Internet.", + "network": network, + "port": port, + "destination": ALL_IP_CIDR, + "nexthop": nexthop, + "is_default": True, + "external_gateway_info": gateway_info, + } + + def _init_custom_route(self, network, port, destination, nexthop): + name = ("custom-route-%(nw)s-dst-%(dst)s-gw-%(nh)s" % + { + "nw": network["id"], + "dst": destination, + "nh": nexthop, + }) + name = str(name).translate(self.TRANS_TABLE) + return { + "id": None, + "name": name, + "network": network, + "port": port, + "destination": destination, + "nexthop": nexthop, + "is_default": False, + } diff --git a/gceapi/api/route_nova_api.py b/gceapi/api/route_nova_api.py new file mode 100644 index 0000000..7e8b35f --- /dev/null +++ b/gceapi/api/route_nova_api.py @@ -0,0 +1,41 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import base_api +from gceapi import exception +from gceapi.openstack.common.gettextutils import _ + + +NOT_SUPPORTED_MESSAGE = _("Routes are not supported with nova network") + + +class API(base_api.API): + """GCE Address API - nova-network implementation.""" + + KIND = "route" + + def _get_type(self): + return self.KIND + + def get_item(self, context, name, scope_id=None): + raise exception.InvalidInput(reason=NOT_SUPPORTED_MESSAGE) + + def get_items(self, context, scope_id=None): + raise exception.InvalidInput(reason=NOT_SUPPORTED_MESSAGE) + + def delete_item(self, context, name, scope_id=None): + raise exception.InvalidInput(reason=NOT_SUPPORTED_MESSAGE) + + def add_item(self, context, name, body, scope_id=None): + raise exception.InvalidInput(reason=NOT_SUPPORTED_MESSAGE) diff --git a/gceapi/api/routes.py b/gceapi/api/routes.py new file mode 100644 index 0000000..46e3901 --- /dev/null +++ b/gceapi/api/routes.py @@ -0,0 +1,52 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import common as gce_common +from gceapi.api import route_api +from gceapi.api import wsgi as gce_wsgi + + +class Controller(gce_common.Controller): + """GCE Route controller""" + + def __init__(self, *args, **kwargs): + super(Controller, self).__init__(route_api.API(), *args, **kwargs) + + def format_item(self, request, route, scope): + network_name = self._qualify( + request, "networks", route["network"]["name"], None) + result_dict = { + "name": route["name"], + "network": network_name, + "destRange": route.get("destination"), + "creationTimestamp": route.get("creationTimestamp", ""), + "priority": 1000, + } + if "external_gateway_info" in route: + result_dict["nextHopGateway"] = self._qualify( + request, "gateways", "default-internet-gateway", scope) + else: + nextHop = route.get("nexthop") + if nextHop is not None: + result_dict["nextHopIp"] = nextHop + else: + result_dict["nextHopNetwork"] = network_name + if "description" in route: + result_dict["description"] = route["description"] + + return self._format_item(request, result_dict, scope) + + +def create_resource(): + return gce_wsgi.GCEResource(Controller()) diff --git a/gceapi/api/scopes.py b/gceapi/api/scopes.py new file mode 100644 index 0000000..87d7269 --- /dev/null +++ b/gceapi/api/scopes.py @@ -0,0 +1,120 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from webob import exc + +from gceapi.api import base_api +from gceapi.api import utils + + +class Scope(object): + """Scope that contains resource. + + The following scopes exists: global, aggregated, zones, regions. + """ + + _type = None + _collection = None + _name = None + + @abc.abstractmethod + def __init__(self, scope_name): + self._name = scope_name + + def get_type(self): + return self._type + + def get_name(self): + return self._name + + def get_collection(self): + return self._collection + + def get_path(self): + if self._collection is not None and self._name is not None: + return "/".join([self._collection, self._name]) + else: + return self._type + + def get_scope_api(self): + base_api.Singleton.get_instance(self.get_type()) + + +class GlobalScope(Scope): + + _type = "global" + + def __init__(self): + super(GlobalScope, self).__init__(None) + + +class AggregatedScope(Scope): + + _type = "aggregated" + + def __init__(self): + super(AggregatedScope, self).__init__(None) + + +class ZoneScope(Scope): + + _type = "zone" + _collection = utils.get_collection_name(_type) + + def __init__(self, scope_name): + super(ZoneScope, self).__init__(scope_name) + + +class RegionScope(Scope): + + _type = "region" + _collection = utils.get_collection_name(_type) + + def __init__(self, scope_name): + super(RegionScope, self).__init__(scope_name) + + +def construct(scope_type, scope_id): + if scope_type == "zone": + return ZoneScope(scope_id) + elif scope_type == "region": + return RegionScope(scope_id) + elif scope_type == "global": + return GlobalScope() + elif scope_type == "aggregated": + return AggregatedScope() + return None + + +def construct_from_path(path, scope_id): + path_info = [item for item in path.split("/") if item] + path_count = len(path_info) + if path_count == 0: + raise exc.HTTPBadRequest(comment="Bad path %s" % path) + if path_count < 3: + return None + collection_or_type = path_info[1] + if collection_or_type in ("zones", "regions") and scope_id is None: + return None + if collection_or_type == "zones": + return ZoneScope(scope_id) + elif collection_or_type == "regions": + return RegionScope(scope_id) + elif collection_or_type == "global": + return GlobalScope() + elif collection_or_type == "aggregated": + return AggregatedScope() + raise exc.HTTPBadRequest(comment="Bad path %s" % path) diff --git a/gceapi/api/snapshot_api.py b/gceapi/api/snapshot_api.py new file mode 100644 index 0000000..9bf147c --- /dev/null +++ b/gceapi/api/snapshot_api.py @@ -0,0 +1,113 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import base_api +from gceapi.api import clients +from gceapi.api import operation_api +from gceapi.api import operation_util +from gceapi.api import utils +from gceapi import exception + + +class API(base_api.API): + """GCE Snapshot API.""" + + KIND = "snapshot" + _status_map = { + 'creating': 'CREATING', + 'available': 'READY', + 'active': 'READY', + 'deleting': 'DELETING', + 'deleted': 'DELETING', + 'error': 'FAILED'} + + def __init__(self, *args, **kwargs): + super(API, self).__init__(*args, **kwargs) + operation_api.API().register_get_progress_method( + "snapshot-add", + self._get_add_item_progress) + operation_api.API().register_get_progress_method( + "snapshot-delete", + self._get_delete_item_progress) + + def _get_type(self): + return self.KIND + + def get_item(self, context, name, scope=None): + client = clients.cinder(context) + snapshots = client.volume_snapshots.list( + search_opts={"display_name": name}) + if snapshots and len(snapshots) == 1: + return self._prepare_item(client, utils.to_dict(snapshots[0])) + raise exception.NotFound + + def get_items(self, context, scope=None): + client = clients.cinder(context) + snapshots = [utils.to_dict(item) + for item in client.volume_snapshots.list()] + for snapshot in snapshots: + self._prepare_item(client, snapshot) + return snapshots + + def delete_item(self, context, name, scope=None): + client = clients.cinder(context).volume_snapshots + snapshots = client.list(search_opts={"display_name": name}) + if not snapshots or len(snapshots) != 1: + raise exception.NotFound + operation_util.start_operation(context, + self._get_delete_item_progress, + snapshots[0].id) + client.delete(snapshots[0]) + + def add_item(self, context, body, scope=None): + name = body["name"] + disk_name = body["disk_name"] + client = clients.cinder(context) + volumes = client.volumes.list(search_opts={"display_name": disk_name}) + if not volumes or len(volumes) != 1: + raise exception.NotFound + + operation_util.start_operation(context, self._get_add_item_progress) + snapshot = client.volume_snapshots.create( + volumes[0].id, True, name, body["description"]) + operation_util.set_item_id(context, snapshot.id) + + return self._prepare_item(client, utils.to_dict(snapshot)) + + def _prepare_item(self, client, item): + item["name"] = item["display_name"] + try: + item["disk"] = utils.to_dict(client.volumes.get(item["volume_id"])) + except Exception: + pass + item["status"] = self._status_map.get(item["status"], item["status"]) + return item + + def _get_add_item_progress(self, context, snapshot_id): + client = clients.cinder(context) + try: + snapshot = client.volume_snapshots.get(snapshot_id) + except clients.cinderclient.exceptions.NotFound: + return operation_api.gef_final_progress() + if (snapshot.status != "creating"): + return operation_api.gef_final_progress(snapshot.status == "error") + + def _get_delete_item_progress(self, context, snapshot_id): + client = clients.cinder(context) + try: + snapshot = client.volume_snapshots.get(snapshot_id) + except clients.cinderclient.exceptions.NotFound: + return operation_api.gef_final_progress() + if snapshot.status not in ["deleting", "deleted"]: + return operation_api.gef_final_progress(True) diff --git a/gceapi/api/snapshots.py b/gceapi/api/snapshots.py new file mode 100644 index 0000000..d8bb23e --- /dev/null +++ b/gceapi/api/snapshots.py @@ -0,0 +1,51 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import common as gce_common +from gceapi.api import scopes +from gceapi.api import snapshot_api +from gceapi.api import wsgi as gce_wsgi +from gceapi import exception + + +class Controller(gce_common.Controller): + """GCE Snapshot controller""" + + def __init__(self, *args, **kwargs): + super(Controller, self).__init__(snapshot_api.API(), *args, **kwargs) + + def format_item(self, request, snapshot, scope): + result_dict = { + "creationTimestamp": self._format_date(snapshot["created_at"]), + "status": snapshot["status"], + "diskSizeGb": snapshot["size"], + "name": snapshot["name"], + "description": snapshot["display_description"], + } + disk = snapshot.get("disk") + if disk is not None: + result_dict["sourceDisk"] = self._qualify( + request, "disks", disk["display_name"], + scopes.ZoneScope(disk["availability_zone"])) + result_dict["sourceDiskId"] = self._get_id( + result_dict["sourceDisk"]) + + return self._format_item(request, result_dict, scope) + + def create(self, req, body, scope): + raise exception.NotFound + + +def create_resource(): + return gce_wsgi.GCEResource(Controller()) diff --git a/gceapi/api/utils.py b/gceapi/api/utils.py new file mode 100644 index 0000000..13cb040 --- /dev/null +++ b/gceapi/api/utils.py @@ -0,0 +1,160 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Utilities and helper functions.""" + + +def _parse_slash(string): + res = '' + sp = string.split('/') + for element in reversed(sp): + res = {element: res} + return res + + +def split_by_comma(string): + between = 0 + last_split = 0 + sp = [] + + i = 0 + while i < len(string): + if string[i] == '(': + between += 1 + elif string[i] == ')': + between -= 1 + elif string[i] == ',' and not between: + sp.append(string[last_split:i]) + last_split = i + 1 + i += 1 + sp.append(string[last_split:]) + return sp + + +def _parse_template(string): + sp = split_by_comma(string) + + i = 0 + while i < len(sp): + if '(' in sp[i]: + sp[i] = sp[i].replace('(', ' ').replace(')', ' ').split() + i += 1 + + json = {} + i = 0 + while i < len(sp): + if isinstance(sp[i], list): + fields = sp[i][1].split(',') + json[sp[i][0]] = [{}] + for field in fields: + dct = _parse_slash(field) + key = dct.keys()[0] + json[sp[i][0]][0][key] = dct[key] + else: + field = _parse_slash(sp[i]) + key = field.keys()[0] + json[key] = field[key] + i += 1 + + return json + + +def apply_template(template_string, json): + + def apply_recursive(template, json): + res = {} + if template == '': + return json + for key, val in template.items(): + if key in json and val == '': + res[key] = json[key] + elif key in json and val == '*': + pass + elif key in json and isinstance(val, list): + if not isinstance(json[key], list): + raise ValueError() + array = [] + for element in json[key]: + r = apply_recursive(val[0], element) + array.append(r) + res[key] = array + elif key in json and isinstance(val, dict): + r = apply_recursive(val, json[key]) + res[key] = r + elif key not in json and key == '*': + for k, v in json.items(): + try: + r = apply_recursive(val, v) + except ValueError: + continue + res[k] = r + elif key not in json: + raise ValueError() + return res + + return apply_recursive(_parse_template(template_string), json) + + +def to_dict(obj, recursive=False, classkey=None): + if hasattr(obj, "__dict__"): + data = dict() + for key in dir(obj): + try: + value = getattr(obj, key) + if not callable(value) and not key.startswith('_'): + data[key] = (value if not recursive + else to_dict(value, recursive, classkey)) + except AttributeError: + pass + if classkey is not None and hasattr(obj, "__class__"): + data[classkey] = obj.__class__.__name__ + return data + + if not recursive: + return obj + + if isinstance(obj, dict): + for k in obj.keys(): + obj[k] = to_dict(obj[k], recursive, classkey) + return obj + elif hasattr(obj, "__iter__"): + return [to_dict(v, recursive, classkey) for v in obj] + + return obj + + +def _extract_name_from_url(url): + """Get object name from fully qualified link.""" + return url.split('/')[-1] + + +def get_collection_name(type_name): + if type_name == "project": + return None + elif type_name.endswith("s"): + return "%ses" % type_name + else: + return "%ss" % type_name + + +def get_type_kind(type_name): + return "compute#%s" % type_name + + +def get_list_kind(type_name): + return "compute#%sList" % type_name + + +def get_aggregated_kind(type_name): + return "compute#%sAggregatedList" % type_name diff --git a/gceapi/api/wsgi.py b/gceapi/api/wsgi.py new file mode 100644 index 0000000..23e9c55 --- /dev/null +++ b/gceapi/api/wsgi.py @@ -0,0 +1,223 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from gceapi import exception +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import jsonutils +from gceapi.openstack.common import log as logging +from gceapi import wsgi_ext as openstack_wsgi + +LOG = logging.getLogger(__name__) + + +class JSONDictSerializer(openstack_wsgi.DictSerializer): + """JSON request body serialization.""" + + def serialize(self, data, request): + params = {'false': False, 'true': True} + pretty_print = request.params.get("prettyPrint", True) + if pretty_print in params: + pretty_print = params[pretty_print] + ident = None + if pretty_print: + ident = 4 + ret = jsonutils.dumps(data, + default=jsonutils.to_primitive, indent=ident) + return ret + + +class GCEResponse(openstack_wsgi.ResponseObject): + """GCE Response body serialization.""" + + def serialize(self, request, content_type, default_serializers=None): + if self.serializer: + serializer = self.serializer + else: + _mtype, _serializer = self.get_serializer(content_type, + default_serializers) + serializer = _serializer() + + response = webob.Response() + response.status_int = self.code + for hdr, value in self._headers.items(): + response.headers[hdr] = value + response.headers['Content-Type'] = content_type + if self.obj is not None: + response.body = serializer.serialize(self.obj, request) + + return response + + +class GCEFault(webob.exc.HTTPException): + """Wrap webob.exc.HTTPException to provide API friendly response.""" + + def __init__(self, exception): + """ + Create a Fault for the given webob.exc.exception or gceapi.exception. + """ + self.wrapped_exc = exception + for key, value in self.wrapped_exc.headers.items(): + self.wrapped_exc.headers[key] = str(value) + + +class GCEResourceExceptionHandler(object): + """Context manager to handle Resource exceptions. + + Used when processing exceptions generated by API implementation + methods (or their extensions). Converts most exceptions to Fault + exceptions, with the appropriate logging. + """ + + def __enter__(self): + return None + + def __exit__(self, ex_type, ex_value, ex_traceback): + if not ex_value: + return True + + if isinstance(ex_value, exception.NotAuthorized): + msg = unicode(ex_value) + raise GCEFault(webob.exc.HTTPForbidden(explanation=msg)) + elif isinstance(ex_value, exception.Invalid): + msg = unicode(ex_value) + raise GCEFault(exception.ConvertedException( + code=ex_value.code, explanation=msg)) + + # Under python 2.6, TypeError's exception value is actually a string, + # so test # here via ex_type instead: + # http://bugs.python.org/issue7853 + elif issubclass(ex_type, TypeError): + exc_info = (ex_type, ex_value, ex_traceback) + LOG.error(_('Exception handling resource: %s') % ex_value, + exc_info=exc_info) + raise GCEFault(webob.exc.HTTPBadRequest()) + elif isinstance(ex_value, GCEFault): + LOG.info(_("Fault thrown: %s"), unicode(ex_value)) + raise ex_value + elif isinstance(ex_value, webob.exc.HTTPException): + LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value)) + raise GCEFault(ex_value) + elif isinstance(ex_value, exception.GceapiException): + LOG.info(_("Gceapi exception thrown: %s"), unicode(ex_value)) + raise GCEFault(ex_value) + else: + msg = unicode(ex_value) + raise GCEFault(exception.ConvertedException( + code=500, title=ex_type.__name__, explanation=msg)) + + +class GCEResource(openstack_wsgi.Resource): + """Common GCE resource response formatter""" + + def __init__(self, *args, **kwargs): + super(GCEResource, self).__init__(*args, **kwargs) + self.default_serializers = dict(json=JSONDictSerializer) + + def _check_requested_project(self, project_id, context): + if (not context or project_id is None + or (project_id not in [context.project_id, context.project_name])): + msg = _("Project '%s' could not be found") % project_id \ + if project_id is not None \ + else _("Project hasn`t been provided") + + raise GCEFault(webob.exc.HTTPBadRequest( + explanation=msg)) + + def _process_stack(self, request, action, action_args, + content_type, body, accept): + """Implement the processing stack.""" + method = None + try: + # Get the implementing method + try: + method = self.get_method(request, action, content_type, body) + except (AttributeError, TypeError): + msg = _("There is no such action: %s") % action + raise GCEFault(webob.exc.HTTPNotFound( + explanation=msg)) + except KeyError as ex: + msg = _("There is no such action: %s") % ex.args[0] + raise GCEFault(webob.exc.HTTPBadRequest( + explanation=msg)) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + raise GCEFault(webob.exc.HTTPBadRequest( + explanation=msg)) + + # Now, deserialize the request body... + try: + if content_type: + contents = self.deserialize(method, content_type, body) + else: + contents = {} + except exception.InvalidContentType: + msg = _("Unsupported Content-Type") + raise GCEFault(webob.exc.HTTPBadRequest( + explanation=msg)) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + raise GCEFault(webob.exc.HTTPBadRequest( + explanation=msg)) + + # Update the action args + action_args.update(contents) + + # Check project + project_id = action_args.pop("project_id", None) + context = request.environ.get('gceapi.context') + action_result = self._check_requested_project(project_id, context) + + if action_result is None: + with GCEResourceExceptionHandler(): + action_result = self.dispatch(method, request, action_args) + + except GCEFault as ex: + action_result = ex.wrapped_exc + + response = None + resp_obj = None + if (action_result is None or type(action_result) is dict or + isinstance(action_result, Exception)): + action_result, result_code = self.controller.process_result( + request, action, action_result) + resp_obj = GCEResponse(action_result, code=result_code) + elif isinstance(action_result, GCEResponse): + resp_obj = action_result + else: + response = action_result + + # Serialize response object + if resp_obj: + if method is not None: + serializers = getattr(method, 'wsgi_serializers', {}) + else: + serializers = {} + resp_obj._bind_method_serializers(serializers) + if method is not None and hasattr(method, 'wsgi_code'): + resp_obj._default_code = method.wsgi_code + resp_obj.preserialize(accept, self.default_serializers) + response = resp_obj.serialize(request, accept, + self.default_serializers) + + try: + msg_dict = dict(url=request.url, status=response.status_int) + msg = _("%(url)s returned with HTTP %(status)d") % msg_dict + except AttributeError as e: + msg_dict = dict(url=request.url, e=e) + msg = _("%(url)s returned a fault: %(e)s") % msg_dict + + LOG.info(msg) + return response diff --git a/gceapi/api/zone_api.py b/gceapi/api/zone_api.py new file mode 100644 index 0000000..712da54 --- /dev/null +++ b/gceapi/api/zone_api.py @@ -0,0 +1,68 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import base_api +from gceapi.api import clients +from gceapi.api import scopes +from gceapi import exception + + +class API(base_api.API): + """GCE Zones API.""" + + KIND = "zone" + COMPUTE_SERVICE = "nova-compute" + + def _get_type(self): + return self.KIND + + def get_item(self, context, name, scope=None): + zones = self.get_items(context) + for zone in zones: + if zone["name"] == name: + return zone + raise exception.NotFound + + def get_items(self, context, scope=None): + client = clients.nova(context) + try: + nova_zones = client.availability_zones.list() + except clients.novaclient.exceptions.Forbidden as e: + try: + nova_zones = client.availability_zones.list(detailed=False) + except Exception: + raise e + + filtered_zones = list() + for zone in nova_zones: + if not zone.hosts: + filtered_zones.append(zone) + continue + for host in zone.hosts: + if self.COMPUTE_SERVICE in zone.hosts[host]: + filtered_zones.append(zone) + break + zones = list() + for zone in filtered_zones: + zones.append({ + "name": zone.zoneName, + "status": "UP" if zone.zoneState["available"] else "DOWN", + "hosts": [host for host in zone.hosts] + if zone.hosts else list() + }) + return zones + + def get_items_as_scopes(self, context): + return [scopes.ZoneScope(zone["name"]) + for zone in self.get_items(context)] diff --git a/gceapi/api/zones.py b/gceapi/api/zones.py new file mode 100644 index 0000000..5793f65 --- /dev/null +++ b/gceapi/api/zones.py @@ -0,0 +1,38 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import common as gce_common +from gceapi.api import region_api +from gceapi.api import wsgi as gce_wsgi +from gceapi.api import zone_api + + +class Controller(gce_common.Controller): + """GCE Zones controller.""" + + def __init__(self, *args, **kwargs): + super(Controller, self).__init__(zone_api.API(), *args, **kwargs) + + def format_item(self, request, zone, scope): + result_dict = { + "name": zone["name"], + "status": zone["status"], + "region": region_api.API().get_items(None)[0]["name"], + } + + return self._format_item(request, result_dict, scope) + + +def create_resource(): + return gce_wsgi.GCEResource(Controller()) diff --git a/gceapi/auth.py b/gceapi/auth.py new file mode 100644 index 0000000..b8a00b5 --- /dev/null +++ b/gceapi/auth.py @@ -0,0 +1,142 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Common Auth Middleware. + +""" + +from oslo.config import cfg +import webob.dec +import webob.exc + +from gceapi import context +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import jsonutils +from gceapi.openstack.common import log as logging +from gceapi import wsgi + + +auth_opts = [ + cfg.BoolOpt('api_rate_limit', + default=False, + help='whether to use per-user rate limiting for the api.'), + cfg.StrOpt('auth_strategy', + default='noauth', + help='The strategy to use for auth: noauth or keystone.'), + cfg.BoolOpt('use_forwarded_for', + default=False, + help='Treat X-Forwarded-For as the canonical remote address. ' + 'Only enable this if you have a sanitizing proxy.'), +] + +CONF = cfg.CONF +CONF.register_opts(auth_opts) + +LOG = logging.getLogger(__name__) + + +def pipeline_factory(loader, global_conf, **local_conf): + """A paste pipeline replica that keys off of auth_strategy.""" + pipeline = local_conf[CONF.auth_strategy] + if not CONF.api_rate_limit: + limit_name = CONF.auth_strategy + '_nolimit' + pipeline = local_conf.get(limit_name, pipeline) + pipeline = pipeline.split() + filters = [loader.get_filter(n) for n in pipeline[:-1]] + app = loader.get_app(pipeline[-1]) + filters.reverse() + for filter_func in filters: + app = filter_func(app) + return app + + +class InjectContext(wsgi.Middleware): + """Add a 'gceapi.context' to WSGI environ.""" + + def __init__(self, context, *args, **kwargs): + self.context = context + super(InjectContext, self).__init__(*args, **kwargs) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + req.environ['gceapi.context'] = self.context + return self.application + + +class GceapiKeystoneContext(wsgi.Middleware): + """Make a request context from keystone headers.""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + user_id = req.headers.get('X_USER') + user_id = req.headers.get('X_USER_ID', user_id) + if user_id is None: + LOG.debug("Neither X_USER_ID nor X_USER found in request") + return webob.exc.HTTPUnauthorized() + + roles = self._get_roles(req) + + if 'X_TENANT_ID' in req.headers: + # This is the new header since Keystone went to ID/Name + project_id = req.headers['X_TENANT_ID'] + else: + # This is for legacy compatibility + project_id = req.headers['X_TENANT'] + project_name = req.headers.get('X_TENANT_NAME') + user_name = req.headers.get('X_USER_NAME') + + # Get the auth token + auth_token = req.headers.get('X_AUTH_TOKEN', + req.headers.get('X_STORAGE_TOKEN')) + + # Build a context, including the auth_token... + remote_address = req.remote_addr + if CONF.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + + service_catalog = None + if req.headers.get('X_SERVICE_CATALOG') is not None: + try: + catalog_header = req.headers.get('X_SERVICE_CATALOG') + service_catalog = jsonutils.loads(catalog_header) + except ValueError: + raise webob.exc.HTTPInternalServerError( + _('Invalid service catalog json.')) + + ctx = context.RequestContext(user_id, + project_id, + user_name=user_name, + project_name=project_name, + roles=roles, + auth_token=auth_token, + remote_address=remote_address, + service_catalog=service_catalog) + + req.environ['gceapi.context'] = ctx + return self.application + + def _get_roles(self, req): + """Get the list of roles.""" + + if 'X_ROLES' in req.headers: + roles = req.headers.get('X_ROLES', '') + else: + # Fallback to deprecated role header: + roles = req.headers.get('X_ROLE', '') + if roles: + LOG.warn(_("Sourcing roles from deprecated X-Role HTTP " + "header")) + return [r.strip() for r in roles.split(',')] diff --git a/gceapi/cmd/__init__.py b/gceapi/cmd/__init__.py new file mode 100644 index 0000000..1e8cb71 --- /dev/null +++ b/gceapi/cmd/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.openstack.common import gettextutils +gettextutils.install('gceapi') diff --git a/gceapi/cmd/api.py b/gceapi/cmd/api.py new file mode 100755 index 0000000..6fb019a --- /dev/null +++ b/gceapi/cmd/api.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Gceapi API Server +""" + +import eventlet +import sys + +eventlet.patcher.monkey_patch(os=False) + +from oslo.config import cfg + +from gceapi import config +from gceapi.openstack.common import log as logging +from gceapi import service + +CONF = cfg.CONF +CONF.import_opt('use_ssl', 'gceapi.service') + + +def main(): + config.parse_args(sys.argv) + logging.setup('gceapi') + + server = service.WSGIService( + 'gce', use_ssl=CONF.use_ssl, max_url_len=16384) + service.serve(server) + service.wait() + + +if __name__ == '__main__': + main() diff --git a/gceapi/cmd/manage.py b/gceapi/cmd/manage.py new file mode 100644 index 0000000..927ba0c --- /dev/null +++ b/gceapi/cmd/manage.py @@ -0,0 +1,75 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +""" + CLI interface for GCE API management. +""" + +import sys + +from oslo.config import cfg + +from gceapi.db import migration +from gceapi.openstack.common import log +from gceapi import version + + +CONF = cfg.CONF + + +def do_db_version(): + """Print database's current migration level.""" + print(migration.db_version()) + + +def do_db_sync(): + """ + Place a database under migration control and upgrade, + creating first if necessary. + """ + migration.db_sync(CONF.command.version) + + +def add_command_parsers(subparsers): + parser = subparsers.add_parser('db_version') + parser.set_defaults(func=do_db_version) + + parser = subparsers.add_parser('db_sync') + parser.set_defaults(func=do_db_sync) + parser.add_argument('version', nargs='?') + parser.add_argument('current_version', nargs='?') + + +command_opt = cfg.SubCommandOpt('command', + title='Commands', + help='Available commands', + handler=add_command_parsers) + + +def main(): + CONF.register_cli_opt(command_opt) + try: + default_config_files = cfg.find_config_files('gceapi', 'gceapi-engine') + CONF(sys.argv[1:], project='gceapi', prog='gceapi-manage', + version=version.version_info.version_string(), + default_config_files=default_config_files) + log.setup("gceapi") + except RuntimeError as e: + sys.exit("ERROR: %s" % e) + + try: + CONF.command.func() + except Exception as e: + sys.exit("ERROR: %s" % e) diff --git a/gceapi/config.py b/gceapi/config.py new file mode 100644 index 0000000..e28dec7 --- /dev/null +++ b/gceapi/config.py @@ -0,0 +1,35 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +from gceapi.openstack.common.db.sqlalchemy import session as db_session +from gceapi import paths +from gceapi import version + +_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('$sqlite_db') + + +def parse_args(argv, default_config_files=None): + db_session.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION, + sqlite_db='gceapi.sqlite') + cfg.CONF(argv[1:], + project='gceapi', + version=version.version_string(), + default_config_files=default_config_files) diff --git a/gceapi/context.py b/gceapi/context.py new file mode 100644 index 0000000..2148f19 --- /dev/null +++ b/gceapi/context.py @@ -0,0 +1,162 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""RequestContext: context for requests that persist through all of gceapi.""" + +import uuid + +from gceapi import exception +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import local +from gceapi.openstack.common import log as logging +from gceapi.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) + + +def generate_request_id(): + return 'req-' + str(uuid.uuid4()) + + +class RequestContext(object): + """Security context and request information. + + Represents the user taking a given action within the system. + + """ + + def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", + roles=None, remote_address=None, timestamp=None, + request_id=None, auth_token=None, overwrite=True, + user_name=None, project_name=None, + service_catalog=None, **kwargs): + """ + :param read_deleted: 'no' indicates deleted records are hidden, 'yes' + indicates deleted records are visible, 'only' indicates that + *only* deleted records are visible. + + :param overwrite: Set to False to ensure that the greenthread local + copy of the index is not overwritten. + + :param kwargs: Extra arguments that might be present, but we ignore + because they possibly came in from older rpc messages. + """ + if kwargs: + LOG.warn(_('Arguments dropped when creating context: %s') % + str(kwargs)) + + self.user_id = user_id + self.project_id = project_id + self.roles = roles or [] + self.read_deleted = read_deleted + self.remote_address = remote_address + if not timestamp: + timestamp = timeutils.utcnow() + if isinstance(timestamp, basestring): + timestamp = timeutils.parse_strtime(timestamp) + self.timestamp = timestamp + if not request_id: + request_id = generate_request_id() + self.request_id = request_id + self.auth_token = auth_token + + self.service_catalog = service_catalog + + self.user_name = user_name + self.project_name = project_name + self.is_admin = is_admin + #if self.is_admin is None: + # self.is_admin = policy.check_is_admin(self) + if overwrite or not hasattr(local.store, 'context'): + self.update_store() + self.operation = None + self.operation_start_time = None + self.operation_get_progress_method = None + self.operation_item_id = None + + def _get_read_deleted(self): + return self._read_deleted + + def _set_read_deleted(self, read_deleted): + if read_deleted not in ('no', 'yes', 'only'): + raise ValueError(_("read_deleted can only be one of 'no', " + "'yes' or 'only', not %r") % read_deleted) + self._read_deleted = read_deleted + + def _del_read_deleted(self): + del self._read_deleted + + read_deleted = property(_get_read_deleted, _set_read_deleted, + _del_read_deleted) + + def update_store(self): + local.store.context = self + + def to_dict(self): + return {'user_id': self.user_id, + 'project_id': self.project_id, + 'is_admin': self.is_admin, + 'read_deleted': self.read_deleted, + 'roles': self.roles, + 'remote_address': self.remote_address, + 'timestamp': timeutils.strtime(self.timestamp), + 'request_id': self.request_id, + 'auth_token': self.auth_token, + 'user_name': self.user_name, + 'service_catalog': self.service_catalog, + 'project_name': self.project_name, + 'tenant': self.tenant, + 'user': self.user} + + @classmethod + def from_dict(cls, values): + return cls(**values) + + # NOTE(sirp): the openstack/common version of RequestContext uses + # tenant/user whereas the gceapi version uses project_id/user_id. We need + # this shim in order to use context-aware code from openstack/common, like + # logging, until we make the switch to using openstack/common's version of + # RequestContext. + @property + def tenant(self): + return self.project_id + + @property + def user(self): + return self.user_id + + +def is_user_context(context): + """Indicates if the request context is a normal user.""" + if not context: + return False + if context.is_admin: + return False + if not context.user_id or not context.project_id: + return False + return True + + +def require_context(ctxt): + """Raise exception.NotAuthorized() if context is not a user or an + admin context. + """ + if not ctxt.is_admin and not is_user_context(ctxt): + raise exception.NotAuthorized() diff --git a/gceapi/db/__init__.py b/gceapi/db/__init__.py new file mode 100644 index 0000000..7874af0 --- /dev/null +++ b/gceapi/db/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +DB abstraction for Gceapi +""" + +from gceapi.db.api import * diff --git a/gceapi/db/api.py b/gceapi/db/api.py new file mode 100644 index 0000000..9e35d3a --- /dev/null +++ b/gceapi/db/api.py @@ -0,0 +1,58 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Defines interface for DB access. + +Functions in this module are imported into the gceapi.db namespace. Call these +functions from gceapi.db namespace, not the gceapi.db.api namespace. + +**Related Flags** + +:dbackend: string to lookup in the list of LazyPluggable backends. + `sqlalchemy` is the only supported backend right now. + +:connection: string specifying the sqlalchemy connection to use, like: + `sqlite:///var/lib/gceapi/gceapi.sqlite`. + +""" + +from gceapi.openstack.common.db import api as db_api + + +_BACKEND_MAPPING = {'sqlalchemy': 'gceapi.db.sqlalchemy.api'} +IMPL = db_api.DBAPI(backend_mapping=_BACKEND_MAPPING) + + +def add_item(context, kind, data): + IMPL.add_item(context, kind, data) + + +def delete_item(context, kind, item_id): + IMPL.delete_item(context, kind, item_id) + + +def update_item(context, kind, item): + IMPL.update_item(context, kind, item) + + +def get_items(context, kind): + return IMPL.get_items(context, kind) + + +def get_item_by_id(context, kind, item_id): + return IMPL.get_item_by_id(context, kind, item_id) + + +def get_item_by_name(context, kind, name): + return IMPL.get_item_by_name(context, kind, name) diff --git a/gceapi/db/migration.py b/gceapi/db/migration.py new file mode 100644 index 0000000..d999446 --- /dev/null +++ b/gceapi/db/migration.py @@ -0,0 +1,73 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Database setup and migration commands.""" + +from oslo.config import cfg + +from gceapi import exception +from gceapi.openstack.common.gettextutils import _ + +CONF = cfg.CONF + + +class LazyPluggable(object): + """A pluggable backend loaded lazily based on some value.""" + + def __init__(self, pivot, config_group=None, **backends): + self.__backends = backends + self.__pivot = pivot + self.__backend = None + self.__config_group = config_group + + def __get_backend(self): + if not self.__backend: + if self.__config_group is None: + backend_name = CONF[self.__pivot] + else: + backend_name = CONF[self.__config_group][self.__pivot] + if backend_name not in self.__backends: + msg = _('Invalid backend: %s') % backend_name + raise exception.GceapiException(msg) + + backend = self.__backends[backend_name] + if isinstance(backend, tuple): + name = backend[0] + fromlist = backend[1] + else: + name = backend + fromlist = backend + + self.__backend = __import__(name, None, None, fromlist) + return self.__backend + + def __getattr__(self, key): + backend = self.__get_backend() + return getattr(backend, key) + +IMPL = LazyPluggable('backend', + config_group='database', + sqlalchemy='gceapi.db.sqlalchemy.migration') + +INIT_VERSION = 0 + + +def db_sync(version=None): + """Migrate the database to `version` or the most recent version.""" + return IMPL.db_sync(INIT_VERSION, version=version) + + +def db_version(): + """Display the current database version.""" + return IMPL.db_version(INIT_VERSION) diff --git a/gceapi/db/sqlalchemy/__init__.py b/gceapi/db/sqlalchemy/__init__.py new file mode 100644 index 0000000..97ca94f --- /dev/null +++ b/gceapi/db/sqlalchemy/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/gceapi/db/sqlalchemy/api.py b/gceapi/db/sqlalchemy/api.py new file mode 100644 index 0000000..ad42615 --- /dev/null +++ b/gceapi/db/sqlalchemy/api.py @@ -0,0 +1,133 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of SQLAlchemy backend.""" + +import ast +import functools +import sys + +from oslo.config import cfg + +import gceapi.context +from gceapi.db.sqlalchemy import models +from gceapi.openstack.common.db.sqlalchemy import session as db_session + +CONF = cfg.CONF +CONF.import_opt('connection', + 'gceapi.openstack.common.db.sqlalchemy.session', + group='database') + +get_session = db_session.get_session + + +def get_backend(): + """The backend is this module itself.""" + return sys.modules[__name__] + + +def require_context(f): + """Decorator to require *any* user or admin context. + + The first argument to the wrapped function must be the context. + """ + + @functools.wraps(f) + def wrapper(*args, **kwargs): + gceapi.context.require_context(args[0]) + return f(*args, **kwargs) + return wrapper + + +def model_query(context, model, *args, **kwargs): + """Query helper that accounts for context's `read_deleted` field. + + :param context: context to query under + :param session: if present, the session to use + """ + session = kwargs.get('session') or get_session() + + return session.query(model, *args).\ + filter_by(project_id=context.project_id) + + +@require_context +def add_item(context, kind, data): + item_ref = models.Item() + item_ref.update({ + "project_id": context.project_id, + "kind": kind, + }) + item_ref.update(_pack_item_data(data)) + item_ref.save() + + +@require_context +def delete_item(context, kind, item_id): + model_query(context, models.Item).\ + filter_by(kind=kind, + id=item_id).\ + delete() + + +@require_context +def update_item(context, kind, item): + item_ref = model_query(context, models.Item).\ + filter_by(kind=kind, + id=item["id"]).\ + one() + item_ref.update(_pack_item_data(item)) + item_ref.save() + + +@require_context +def get_items(context, kind): + return [_unpack_item_data(item) + for item in model_query(context, models.Item). + filter_by(kind=kind). + all()] + + +@require_context +def get_item_by_id(context, kind, item_id): + return _unpack_item_data(model_query(context, models.Item). + filter_by(kind=kind, + id=item_id). + first()) + + +@require_context +def get_item_by_name(context, kind, name): + return _unpack_item_data(model_query(context, models.Item). + filter_by(kind=kind, + name=name). + first()) + + +def _pack_item_data(item_data): + return { + "id": item_data.pop("id"), + "name": item_data.pop("name", None), + "data": str(item_data), + } + + +def _unpack_item_data(item_ref): + if item_ref is None: + return None + data = ast.literal_eval(item_ref.data) + data["id"] = item_ref.id + if item_ref.name is not None: + data["name"] = item_ref.name + return data diff --git a/gceapi/db/sqlalchemy/migrate_repo/README b/gceapi/db/sqlalchemy/migrate_repo/README new file mode 100644 index 0000000..6218f8c --- /dev/null +++ b/gceapi/db/sqlalchemy/migrate_repo/README @@ -0,0 +1,4 @@ +This is a database migration repository. + +More information at +http://code.google.com/p/sqlalchemy-migrate/ diff --git a/gceapi/db/sqlalchemy/migrate_repo/__init__.py b/gceapi/db/sqlalchemy/migrate_repo/__init__.py new file mode 100644 index 0000000..97ca94f --- /dev/null +++ b/gceapi/db/sqlalchemy/migrate_repo/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/gceapi/db/sqlalchemy/migrate_repo/manage.py b/gceapi/db/sqlalchemy/migrate_repo/manage.py new file mode 100644 index 0000000..fdca255 --- /dev/null +++ b/gceapi/db/sqlalchemy/migrate_repo/manage.py @@ -0,0 +1,19 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from migrate.versioning.shell import main + + +if __name__ == '__main__': + main(debug='False', repository='.') diff --git a/gceapi/db/sqlalchemy/migrate_repo/migrate.cfg b/gceapi/db/sqlalchemy/migrate_repo/migrate.cfg new file mode 100644 index 0000000..f47114a --- /dev/null +++ b/gceapi/db/sqlalchemy/migrate_repo/migrate.cfg @@ -0,0 +1,20 @@ +[db_settings] +# Used to identify which repository this database is versioned under. +# You can use the name of your project. +repository_id=gceapi + +# The name of the database table used to track the schema version. +# This name shouldn't already be used by your project. +# If this is changed once a database is under version control, you'll need to +# change the table name in each database too. +version_table=migrate_version + +# When committing a change script, Migrate will attempt to generate the +# sql for all supported databases; normally, if one of them fails - probably +# because you don't have that database installed - it is ignored and the +# commit continues, perhaps ending successfully. +# Databases in this list MUST compile successfully during a commit, or the +# entire commit will fail. List the databases your application will actually +# be using to ensure your updates to that database work properly. +# This must be a list; example: ['postgres','sqlite'] +required_dbs=[] diff --git a/gceapi/db/sqlalchemy/migrate_repo/versions/001_icehouse.py b/gceapi/db/sqlalchemy/migrate_repo/versions/001_icehouse.py new file mode 100644 index 0000000..2a5861d --- /dev/null +++ b/gceapi/db/sqlalchemy/migrate_repo/versions/001_icehouse.py @@ -0,0 +1,38 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Index, MetaData, PrimaryKeyConstraint +from sqlalchemy import String, Table, Text + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + items = Table('items', meta, + Column("id", String(length=255)), + Column("project_id", String(length=255)), + Column("kind", String(length=50)), + Column("name", String(length=63)), + Column("data", Text()), + PrimaryKeyConstraint('kind', 'id'), + Index('items_project_kind_name_idx', 'project_id', 'kind', 'name'), + mysql_engine="InnoDB", + mysql_charset="utf8" + ) + items.create() + + +def downgrade(migrate_engine): + raise NotImplementedError("Downgrade from Icehouse is unsupported.") diff --git a/gceapi/db/sqlalchemy/migrate_repo/versions/__init__.py b/gceapi/db/sqlalchemy/migrate_repo/versions/__init__.py new file mode 100644 index 0000000..97ca94f --- /dev/null +++ b/gceapi/db/sqlalchemy/migrate_repo/versions/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/gceapi/db/sqlalchemy/migration.py b/gceapi/db/sqlalchemy/migration.py new file mode 100644 index 0000000..2a252b5 --- /dev/null +++ b/gceapi/db/sqlalchemy/migration.py @@ -0,0 +1,30 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from gceapi.openstack.common.db.sqlalchemy import migration + + +def db_sync(init_version, version=None): + return migration.db_sync(_get_repo_path(), version, init_version) + + +def db_version(init_version): + return migration.db_version(_get_repo_path(), init_version) + + +def _get_repo_path(): + return os.path.join(os.path.abspath(os.path.dirname(__file__)), + 'migrate_repo') diff --git a/gceapi/db/sqlalchemy/models.py b/gceapi/db/sqlalchemy/models.py new file mode 100644 index 0000000..f6be09f --- /dev/null +++ b/gceapi/db/sqlalchemy/models.py @@ -0,0 +1,37 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +SQLAlchemy models for gceapi data. +""" + +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import Column, Index, PrimaryKeyConstraint, String, Text + +from gceapi.openstack.common.db.sqlalchemy import models + +BASE = declarative_base() + + +class Item(BASE, models.ModelBase): + __tablename__ = 'items' + __table_args__ = ( + PrimaryKeyConstraint('kind', 'id'), + Index('items_project_kind_name_idx', 'project_id', 'kind', 'name'), + ) + id = Column(String(length=255)) + project_id = Column(String(length=255)) + kind = Column(String(length=50)) + name = Column(String(length=63)) + data = Column(Text()) diff --git a/gceapi/exception.py b/gceapi/exception.py new file mode 100644 index 0000000..5439c7a --- /dev/null +++ b/gceapi/exception.py @@ -0,0 +1,1423 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Gceapi base exception handling. + +Includes decorator for re-raising gceapi-type exceptions. + +SHOULD include dedicated exception logging. + +""" + +import sys + +from oslo.config import cfg +import webob.exc + +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import log as logging + +LOG = logging.getLogger(__name__) + +exc_log_opts = [ + cfg.BoolOpt('fatal_exception_format_errors', + default=False, + help='make exception message format errors fatal'), +] + +CONF = cfg.CONF +CONF.register_opts(exc_log_opts) + + +class ConvertedException(webob.exc.WSGIHTTPException): + def __init__(self, code=0, title="", explanation=""): + self.code = code + self.title = title + self.explanation = explanation + super(ConvertedException, self).__init__() + + +class GceapiException(Exception): + """Base Gceapi Exception + + To correctly use this class, inherit from it and define + a 'msg_fmt' property. That msg_fmt will get printf'd + with the keyword arguments provided to the constructor. + + """ + msg_fmt = _("An unknown exception occurred.") + code = 500 + headers = {} + safe = False + + def __init__(self, message=None, **kwargs): + self.kwargs = kwargs + + if 'code' not in self.kwargs: + try: + self.kwargs['code'] = self.code + except AttributeError: + pass + + if not message: + try: + message = self.msg_fmt % kwargs + + except Exception: + exc_info = sys.exc_info() + # kwargs doesn't match a variable in the message + # log the issue and the kwargs + LOG.exception(_('Exception in string format operation')) + for name, value in kwargs.iteritems(): + LOG.error("%s: %s" % (name, value)) + + if CONF.fatal_exception_format_errors: + raise exc_info[0], exc_info[1], exc_info[2] + else: + # at least get the core message out if something happened + message = self.msg_fmt + + super(GceapiException, self).__init__(message) + + def format_message(self): + # NOTE(mrodden): use the first argument to the python Exception object + # which should be our full GceapiException message, (see __init__) + return self.args[0] + + +class EncryptionFailure(GceapiException): + msg_fmt = _("Failed to encrypt text: %(reason)s") + + +class DecryptionFailure(GceapiException): + msg_fmt = _("Failed to decrypt text: %(reason)s") + + +class VirtualInterfaceCreateException(GceapiException): + msg_fmt = _("Virtual Interface creation failed") + + +class VirtualInterfaceMacAddressException(GceapiException): + msg_fmt = _("5 attempts to create virtual interface" + "with unique mac address failed") + + +class GlanceConnectionFailed(GceapiException): + msg_fmt = _("Connection to glance host %(host)s:%(port)s failed: " + "%(reason)s") + + +class NotAuthorized(GceapiException): + ec2_code = 'AuthFailure' + msg_fmt = _("Not authorized.") + code = 403 + + +class AdminRequired(NotAuthorized): + msg_fmt = _("User does not have admin privileges") + + +class PolicyNotAuthorized(NotAuthorized): + msg_fmt = _("Policy doesn't allow %(action)s to be performed.") + + +class ImageNotActive(GceapiException): + # NOTE(jruzicka): IncorrectState is used for volumes only in EC2, + # but it still seems like the most appropriate option. + ec2_code = 'IncorrectState' + msg_fmt = _("Image %(image_id)s is not active.") + + +class ImageNotAuthorized(GceapiException): + msg_fmt = _("Not authorized for image %(image_id)s.") + + +class Invalid(GceapiException): + msg_fmt = _("Unacceptable parameters.") + code = 400 + + +class InvalidBDM(Invalid): + msg_fmt = _("Block Device Mapping is Invalid.") + + +class InvalidBDMSnapshot(InvalidBDM): + msg_fmt = _("Block Device Mapping is Invalid: " + "failed to get snapshot %(id)s.") + + +class InvalidBDMVolume(InvalidBDM): + msg_fmt = _("Block Device Mapping is Invalid: " + "failed to get volume %(id)s.") + + +class InvalidBDMImage(InvalidBDM): + msg_fmt = _("Block Device Mapping is Invalid: " + "failed to get image %(id)s.") + + +class InvalidBDMBootSequence(InvalidBDM): + msg_fmt = _("Block Device Mapping is Invalid: " + "Boot sequence for the instance " + "and image/block device mapping " + "combination is not valid.") + + +class InvalidBDMLocalsLimit(InvalidBDM): + msg_fmt = _("Block Device Mapping is Invalid: " + "You specified more local devices than the " + "limit allows") + + +class InvalidBDMEphemeralSize(InvalidBDM): + msg_fmt = _("Ephemeral disks requested are larger than " + "the instance type allows.") + + +class InvalidBDMSwapSize(InvalidBDM): + msg_fmt = _("Swap drive requested is larger than instance type allows.") + + +class InvalidBDMFormat(InvalidBDM): + msg_fmt = _("Block Device Mapping is Invalid: " + "%(details)s") + + +class InvalidBDMForLegacy(InvalidBDM): + msg_fmt = _("Block Device Mapping cannot " + "be converted to legacy format. ") + + +class InvalidAttribute(Invalid): + msg_fmt = _("Attribute not supported: %(attr)s") + + +class VolumeUnattached(Invalid): + ec2_code = 'IncorrectState' + msg_fmt = _("Volume %(volume_id)s is not attached to anything") + + +class VolumeNotCreated(GceapiException): + msg_fmt = _("Volume %(volume_id)s did not finish being created" + " even after we waited %(seconds)s seconds or %(attempts)s" + " attempts.") + + +class InvalidKeypair(Invalid): + ec2_code = 'InvalidKeyPair.Format' + msg_fmt = _("Keypair data is invalid") + ": %(reason)s" + + +class InvalidRequest(Invalid): + msg_fmt = _("The request is invalid.") + + +class InvalidInput(Invalid): + msg_fmt = _("Invalid input received") + ": %(reason)s" + + +class InvalidVolume(Invalid): + ec2_code = 'UnsupportedOperation' + msg_fmt = _("Invalid volume") + ": %(reason)s" + + +class InvalidMetadata(Invalid): + msg_fmt = _("Invalid metadata") + ": %(reason)s" + + +class InvalidMetadataSize(Invalid): + msg_fmt = _("Invalid metadata size") + ": %(reason)s" + + +class InvalidPortRange(Invalid): + ec2_code = 'InvalidParameterValue' + msg_fmt = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s") + + +class InvalidIpProtocol(Invalid): + msg_fmt = _("Invalid IP protocol %(protocol)s.") + + +class InvalidContentType(Invalid): + msg_fmt = _("Invalid content type %(content_type)s.") + + +class InvalidCidr(Invalid): + msg_fmt = _("Invalid cidr %(cidr)s.") + + +class InvalidUnicodeParameter(Invalid): + msg_fmt = _("Invalid Parameter: " + "Unicode is not supported by the current database.") + + +# Cannot be templated as the error syntax varies. +# msg needs to be constructed when raised. +class InvalidParameterValue(Invalid): + ec2_code = 'InvalidParameterValue' + msg_fmt = _("%(err)s") + + +class InvalidAggregateAction(Invalid): + msg_fmt = _("Cannot perform action '%(action)s' on aggregate " + "%(aggregate_id)s. Reason: %(reason)s.") + + +class InvalidGroup(Invalid): + msg_fmt = _("Group not valid. Reason: %(reason)s") + + +class InvalidSortKey(Invalid): + msg_fmt = _("Sort key supplied was not valid.") + + +class InstanceInvalidState(Invalid): + msg_fmt = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot " + "%(method)s while the instance is in this state.") + + +class InstanceNotRunning(Invalid): + msg_fmt = _("Instance %(instance_id)s is not running.") + + +class InstanceNotInRescueMode(Invalid): + msg_fmt = _("Instance %(instance_id)s is not in rescue mode") + + +class InstanceNotRescuable(Invalid): + msg_fmt = _("Instance %(instance_id)s cannot be rescued: %(reason)s") + + +class InstanceNotReady(Invalid): + msg_fmt = _("Instance %(instance_id)s is not ready") + + +class InstanceSuspendFailure(Invalid): + msg_fmt = _("Failed to suspend instance") + ": %(reason)s" + + +class InstanceResumeFailure(Invalid): + msg_fmt = _("Failed to resume instance: %(reason)s.") + + +class InstancePowerOnFailure(Invalid): + msg_fmt = _("Failed to power on instance: %(reason)s.") + + +class InstancePowerOffFailure(Invalid): + msg_fmt = _("Failed to power off instance: %(reason)s.") + + +class InstanceRebootFailure(Invalid): + msg_fmt = _("Failed to reboot instance") + ": %(reason)s" + + +class InstanceTerminationFailure(Invalid): + msg_fmt = _("Failed to terminate instance") + ": %(reason)s" + + +class InstanceDeployFailure(Invalid): + msg_fmt = _("Failed to deploy instance") + ": %(reason)s" + + +class MultiplePortsNotApplicable(Invalid): + msg_fmt = _("Failed to launch instances") + ": %(reason)s" + + +class ServiceUnavailable(Invalid): + msg_fmt = _("Service is unavailable at this time.") + + +class ComputeResourcesUnavailable(ServiceUnavailable): + msg_fmt = _("Insufficient compute resources.") + + +class ComputeServiceUnavailable(ServiceUnavailable): + msg_fmt = _("Compute service of %(host)s is unavailable at this time.") + + +class UnableToMigrateToSelf(Invalid): + msg_fmt = _("Unable to migrate instance (%(instance_id)s) " + "to current host (%(host)s).") + + +class InvalidHypervisorType(Invalid): + msg_fmt = _("The supplied hypervisor type of is invalid.") + + +class DestinationHypervisorTooOld(Invalid): + msg_fmt = _("The instance requires a newer hypervisor version than " + "has been provided.") + + +class DestinationDiskExists(Invalid): + msg_fmt = _("The supplied disk path (%(path)s) already exists, " + "it is expected not to exist.") + + +class InvalidDevicePath(Invalid): + msg_fmt = _("The supplied device path (%(path)s) is invalid.") + + +class DevicePathInUse(Invalid): + msg_fmt = _("The supplied device path (%(path)s) is in use.") + code = 409 + + +class DeviceIsBusy(Invalid): + msg_fmt = _("The supplied device (%(device)s) is busy.") + + +class InvalidCPUInfo(Invalid): + msg_fmt = _("Unacceptable CPU info") + ": %(reason)s" + + +class InvalidIpAddressError(Invalid): + msg_fmt = _("%(address)s is not a valid IP v4/6 address.") + + +class InvalidVLANTag(Invalid): + msg_fmt = _("VLAN tag is not appropriate for the port group " + "%(bridge)s. Expected VLAN tag is %(tag)s, " + "but the one associated with the port group is %(pgroup)s.") + + +class InvalidVLANPortGroup(Invalid): + msg_fmt = _("vSwitch which contains the port group %(bridge)s is " + "not associated with the desired physical adapter. " + "Expected vSwitch is %(expected)s, but the one associated " + "is %(actual)s.") + + +class InvalidDiskFormat(Invalid): + msg_fmt = _("Disk format %(disk_format)s is not acceptable") + + +class ImageUnacceptable(Invalid): + msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s") + + +class InstanceUnacceptable(Invalid): + msg_fmt = _("Instance %(instance_id)s is unacceptable: %(reason)s") + + +class InvalidEc2Id(Invalid): + msg_fmt = _("Ec2 id %(ec2_id)s is unacceptable.") + + +class InvalidUUID(Invalid): + msg_fmt = _("Expected a uuid but received %(uuid)s.") + + +class InvalidID(Invalid): + msg_fmt = _("Invalid ID received %(id)s.") + + +class ConstraintNotMet(GceapiException): + msg_fmt = _("Constraint not met.") + code = 412 + + +class NotFound(GceapiException): + msg_fmt = _("Resource could not be found.") + code = 404 + + +class AgentBuildNotFound(NotFound): + msg_fmt = _("No agent-build associated with id %(id)s.") + + +class AgentBuildExists(GceapiException): + msg_fmt = _("Agent-build with hypervisor %(hypervisor)s os %(os)s " + "architecture %(architecture)s exists.") + + +class VolumeNotFound(NotFound): + ec2_code = 'InvalidVolumeID.NotFound' + msg_fmt = _("Volume %(volume_id)s could not be found.") + + +class SnapshotNotFound(NotFound): + ec2_code = 'InvalidSnapshotID.NotFound' + msg_fmt = _("Snapshot %(snapshot_id)s could not be found.") + + +class DiskNotFound(NotFound): + msg_fmt = _("No disk at %(location)s") + + +class VolumeDriverNotFound(NotFound): + msg_fmt = _("Could not find a handler for %(driver_type)s volume.") + + +class InvalidImageRef(Invalid): + msg_fmt = _("Invalid image href %(image_href)s.") + + +class AutoDiskConfigDisabledByImage(Invalid): + msg_fmt = _("Requested image %(image)s " + "has automatic disk resize disabled.") + + +class ImageNotFound(NotFound): + msg_fmt = _("Image %(image_id)s could not be found.") + + +# NOTE(jruzicka): ImageNotFound is not a valid EC2 error code. +class ImageNotFoundEC2(ImageNotFound): + msg_fmt = _("Image %(image_id)s could not be found. The Gceapi EC2 API " + "assigns image ids dynamically when they are listed for the " + "first time. Have you listed image ids since adding this " + "image?") + + +class ProjectNotFound(NotFound): + msg_fmt = _("Project %(project_id)s could not be found.") + + +class StorageRepositoryNotFound(NotFound): + msg_fmt = _("Cannot find SR to read/write VDI.") + + +class NetworkDuplicated(Invalid): + msg_fmt = _("Network %(network_id)s is duplicated.") + + +class NetworkInUse(GceapiException): + msg_fmt = _("Network %(network_id)s is still in use.") + + +class NetworkNotCreated(GceapiException): + msg_fmt = _("%(req)s is required to create a network.") + + +class NetworkNotFound(NotFound): + msg_fmt = _("Network %(network_id)s could not be found.") + + +class PortNotFound(NotFound): + msg_fmt = _("Port id %(port_id)s could not be found.") + + +class NetworkNotFoundForBridge(NetworkNotFound): + msg_fmt = _("Network could not be found for bridge %(bridge)s") + + +class NetworkNotFoundForUUID(NetworkNotFound): + msg_fmt = _("Network could not be found for uuid %(uuid)s") + + +class NetworkNotFoundForCidr(NetworkNotFound): + msg_fmt = _("Network could not be found with cidr %(cidr)s.") + + +class NetworkNotFoundForInstance(NetworkNotFound): + msg_fmt = _("Network could not be found for instance %(instance_id)s.") + + +class NoNetworksFound(NotFound): + msg_fmt = _("No networks defined.") + + +class NetworkNotFoundForProject(NotFound): + msg_fmt = _("Either Network uuid %(network_uuid)s is not present or " + "is not assigned to the project %(project_id)s.") + + +class NetworkAmbiguous(Invalid): + msg_fmt = _("More than one possible network found. Specify " + "network ID(s) to select which one(s) to connect to,") + + +class DatastoreNotFound(NotFound): + msg_fmt = _("Could not find the datastore reference(s) which the VM uses.") + + +class PortInUse(Invalid): + msg_fmt = _("Port %(port_id)s is still in use.") + + +class PortNotUsable(Invalid): + msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s.") + + +class PortNotFree(Invalid): + msg_fmt = _("No free port available for instance %(instance)s.") + + +class FixedIpExists(GceapiException): + msg_fmt = _("Fixed ip %(address)s already exists.") + + +class FixedIpNotFound(NotFound): + msg_fmt = _("No fixed IP associated with id %(id)s.") + + +class FixedIpNotFoundForAddress(FixedIpNotFound): + msg_fmt = _("Fixed ip not found for address %(address)s.") + + +class FixedIpNotFoundForInstance(FixedIpNotFound): + msg_fmt = _("Instance %(instance_uuid)s has zero fixed ips.") + + +class FixedIpNotFoundForNetworkHost(FixedIpNotFound): + msg_fmt = _("Network host %(host)s has zero fixed ips " + "in network %(network_id)s.") + + +class FixedIpNotFoundForSpecificInstance(FixedIpNotFound): + msg_fmt = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.") + + +class FixedIpNotFoundForNetwork(FixedIpNotFound): + msg_fmt = _("Fixed IP address (%(address)s) does not exist in " + "network (%(network_uuid)s).") + + +class FixedIpAlreadyInUse(GceapiException): + msg_fmt = _("Fixed IP address %(address)s is already in use on instance " + "%(instance_uuid)s.") + + +class FixedIpAssociatedWithMultipleInstances(GceapiException): + msg_fmt = _("More than one instance is associated with fixed ip address " + "'%(address)s'.") + + +class FixedIpInvalid(Invalid): + msg_fmt = _("Fixed IP address %(address)s is invalid.") + + +class NoMoreFixedIps(GceapiException): + ec2_code = 'UnsupportedOperation' + msg_fmt = _("Zero fixed ips available.") + + +class NoFixedIpsDefined(NotFound): + msg_fmt = _("Zero fixed ips could be found.") + + +class FloatingIpExists(GceapiException): + msg_fmt = _("Floating ip %(address)s already exists.") + + +class FloatingIpNotFound(NotFound): + ec2_code = "UnsupportedOpperation" + msg_fmt = _("Floating ip not found for id %(id)s.") + + +class FloatingIpDNSExists(Invalid): + msg_fmt = _("The DNS entry %(name)s already exists in domain %(domain)s.") + + +class FloatingIpNotFoundForAddress(FloatingIpNotFound): + msg_fmt = _("Floating ip not found for address %(address)s.") + + +class FloatingIpNotFoundForHost(FloatingIpNotFound): + msg_fmt = _("Floating ip not found for host %(host)s.") + + +class FloatingIpMultipleFoundForAddress(GceapiException): + msg_fmt = _("Multiple floating ips are found for address %(address)s.") + + +class FloatingIpPoolNotFound(NotFound): + msg_fmt = _("Floating ip pool not found.") + safe = True + + +class NoMoreFloatingIps(FloatingIpNotFound): + msg_fmt = _("Zero floating ips available.") + safe = True + + +class FloatingIpAssociated(GceapiException): + ec2_code = "UnsupportedOpperation" + msg_fmt = _("Floating ip %(address)s is associated.") + + +class FloatingIpNotAssociated(GceapiException): + msg_fmt = _("Floating ip %(address)s is not associated.") + + +class NoFloatingIpsDefined(NotFound): + msg_fmt = _("Zero floating ips exist.") + + +class NoFloatingIpInterface(NotFound): + ec2_code = "UnsupportedOpperation" + msg_fmt = _("Interface %(interface)s not found.") + + +class CannotDisassociateAutoAssignedFloatingIP(GceapiException): + ec2_code = "UnsupportedOpperation" + msg_fmt = _("Cannot disassociate auto assigned floating ip") + + +class KeypairNotFound(NotFound): + ec2_code = 'InvalidKeyPair.NotFound' + msg_fmt = _("Keypair %(name)s not found for user %(user_id)s") + + +class ServiceNotFound(NotFound): + msg_fmt = _("Service %(service_id)s could not be found.") + + +class ServiceBinaryExists(GceapiException): + msg_fmt = _("Service with host %(host)s binary %(binary)s exists.") + + +class ServiceTopicExists(GceapiException): + msg_fmt = _("Service with host %(host)s topic %(topic)s exists.") + + +class HostNotFound(NotFound): + msg_fmt = _("Host %(host)s could not be found.") + + +class ComputeHostNotFound(HostNotFound): + msg_fmt = _("Compute host %(host)s could not be found.") + + +class HostBinaryNotFound(NotFound): + msg_fmt = _("Could not find binary %(binary)s on host %(host)s.") + + +class InvalidReservationExpiration(Invalid): + msg_fmt = _("Invalid reservation expiration %(expire)s.") + + +class InvalidQuotaValue(Invalid): + msg_fmt = _("Change would make usage less than 0 for the following " + "resources: %(unders)s") + + +class QuotaNotFound(NotFound): + msg_fmt = _("Quota could not be found") + + +class QuotaExists(GceapiException): + msg_fmt = _("Quota exists for project %(project_id)s, " + "resource %(resource)s") + + +class QuotaResourceUnknown(QuotaNotFound): + msg_fmt = _("Unknown quota resources %(unknown)s.") + + +class ProjectUserQuotaNotFound(QuotaNotFound): + msg_fmt = _("Quota for user %(user_id)s in project %(project_id)s " + "could not be found.") + + +class ProjectQuotaNotFound(QuotaNotFound): + msg_fmt = _("Quota for project %(project_id)s could not be found.") + + +class QuotaClassNotFound(QuotaNotFound): + msg_fmt = _("Quota class %(class_name)s could not be found.") + + +class QuotaUsageNotFound(QuotaNotFound): + msg_fmt = _("Quota usage for project %(project_id)s could not be found.") + + +class ReservationNotFound(QuotaNotFound): + msg_fmt = _("Quota reservation %(uuid)s could not be found.") + + +class OverQuota(GceapiException): + msg_fmt = _("Quota exceeded for resources: %(overs)s") + + +class SecurityGroupNotFound(NotFound): + msg_fmt = _("Security group %(security_group_id)s not found.") + + +class SecurityGroupNotFoundForProject(SecurityGroupNotFound): + msg_fmt = _("Security group %(security_group_id)s not found " + "for project %(project_id)s.") + + +class SecurityGroupNotFoundForRule(SecurityGroupNotFound): + msg_fmt = _("Security group with rule %(rule_id)s not found.") + + +class SecurityGroupExists(Invalid): + ec2_code = 'InvalidGroup.Duplicate' + msg_fmt = _("Security group %(security_group_name)s already exists " + "for project %(project_id)s.") + + +class SecurityGroupExistsForInstance(Invalid): + msg_fmt = _("Security group %(security_group_id)s is already associated" + " with the instance %(instance_id)s") + + +class SecurityGroupNotExistsForInstance(Invalid): + msg_fmt = _("Security group %(security_group_id)s is not associated with" + " the instance %(instance_id)s") + + +class SecurityGroupDefaultRuleNotFound(Invalid): + msg_fmt = _("Security group default rule (%rule_id)s not found.") + + +class SecurityGroupCannotBeApplied(Invalid): + msg_fmt = _("Network requires port_security_enabled and subnet associated" + " in order to apply security groups.") + + +class SecurityGroupRuleExists(Invalid): + ec2_code = 'InvalidPermission.Duplicate' + msg_fmt = _("Rule already exists in group: %(rule)s") + + +class NoUniqueMatch(GceapiException): + msg_fmt = _("No Unique Match Found.") + code = 409 + + +class MigrationNotFound(NotFound): + msg_fmt = _("Migration %(migration_id)s could not be found.") + + +class MigrationNotFoundByStatus(MigrationNotFound): + msg_fmt = _("Migration not found for instance %(instance_id)s " + "with status %(status)s.") + + +class ConsolePoolNotFound(NotFound): + msg_fmt = _("Console pool %(pool_id)s could not be found.") + + +class ConsolePoolExists(GceapiException): + msg_fmt = _("Console pool with host %(host)s, console_type " + "%(console_type)s and compute_host %(compute_host)s " + "already exists.") + + +class ConsolePoolNotFoundForHostType(NotFound): + msg_fmt = _("Console pool of type %(console_type)s " + "for compute host %(compute_host)s " + "on proxy host %(host)s not found.") + + +class ConsoleNotFound(NotFound): + msg_fmt = _("Console %(console_id)s could not be found.") + + +class ConsoleNotFoundForInstance(ConsoleNotFound): + msg_fmt = _("Console for instance %(instance_uuid)s could not be found.") + + +class ConsoleNotFoundInPoolForInstance(ConsoleNotFound): + msg_fmt = _("Console for instance %(instance_uuid)s " + "in pool %(pool_id)s could not be found.") + + +class ConsoleTypeInvalid(Invalid): + msg_fmt = _("Invalid console type %(console_type)s") + + +class ConsoleTypeUnavailable(Invalid): + msg_fmt = _("Unavailable console type %(console_type)s.") + + +class InstanceTypeNotFound(NotFound): + msg_fmt = _("Instance type %(instance_type_id)s could not be found.") + + +class InstanceTypeNotFoundByName(InstanceTypeNotFound): + msg_fmt = _("Instance type with name %(instance_type_name)s " + "could not be found.") + + +class FlavorNotFound(NotFound): + msg_fmt = _("Flavor %(flavor_id)s could not be found.") + + +class FlavorAccessNotFound(NotFound): + msg_fmt = _("Flavor access not found for %(flavor_id)s / " + "%(project_id)s combination.") + + +class CellNotFound(NotFound): + msg_fmt = _("Cell %(cell_name)s doesn't exist.") + + +class CellExists(GceapiException): + msg_fmt = _("Cell with name %(name)s already exists.") + + +class CellRoutingInconsistency(GceapiException): + msg_fmt = _("Inconsistency in cell routing: %(reason)s") + + +class CellServiceAPIMethodNotFound(NotFound): + msg_fmt = _("Service API method not found: %(detail)s") + + +class CellTimeout(NotFound): + msg_fmt = _("Timeout waiting for response from cell") + + +class CellMaxHopCountReached(GceapiException): + msg_fmt = _("Cell message has reached maximum hop count: %(hop_count)s") + + +class NoCellsAvailable(GceapiException): + msg_fmt = _("No cells available matching scheduling criteria.") + + +class CellsUpdateUnsupported(GceapiException): + msg_fmt = _("Cannot update cells configuration file.") + + +class InstanceUnknownCell(NotFound): + msg_fmt = _("Cell is not known for instance %(instance_uuid)s") + + +class SchedulerHostFilterNotFound(NotFound): + msg_fmt = _("Scheduler Host Filter %(filter_name)s could not be found.") + + +class InstanceTypeExtraSpecsNotFound(NotFound): + msg_fmt = _("Instance Type %(instance_type_id)s has no extra specs with " + "key %(extra_specs_key)s.") + + +class FileNotFound(NotFound): + msg_fmt = _("File %(file_path)s could not be found.") + + +class NoFilesFound(NotFound): + msg_fmt = _("Zero files could be found.") + + +class SwitchNotFoundForNetworkAdapter(NotFound): + msg_fmt = _("Virtual switch associated with the " + "network adapter %(adapter)s not found.") + + +class NetworkAdapterNotFound(NotFound): + msg_fmt = _("Network adapter %(adapter)s could not be found.") + + +class ClassNotFound(NotFound): + msg_fmt = _("Class %(class_name)s could not be found: %(exception)s") + + +class NotAllowed(GceapiException): + msg_fmt = _("Action not allowed.") + + +class ImageRotationNotAllowed(GceapiException): + msg_fmt = _("Rotation is not allowed for snapshots") + + +class RotationRequiredForBackup(GceapiException): + msg_fmt = _("Rotation param is required for backup image_type") + + +class KeyPairExists(GceapiException): + ec2_code = 'InvalidKeyPair.Duplicate' + msg_fmt = _("Key pair '%(key_name)s' already exists.") + + +class InstanceExists(GceapiException): + msg_fmt = _("Instance %(name)s already exists.") + + +class InstanceTypeExists(GceapiException): + msg_fmt = _("Instance Type with name %(name)s already exists.") + + +class InstanceTypeIdExists(GceapiException): + msg_fmt = _("Instance Type with ID %(flavor_id)s already exists.") + + +class FlavorAccessExists(GceapiException): + msg_fmt = _("Flavor access already exists for flavor %(flavor_id)s " + "and project %(project_id)s combination.") + + +class InvalidSharedStorage(GceapiException): + msg_fmt = _("%(path)s is not on shared storage: %(reason)s") + + +class InvalidLocalStorage(GceapiException): + msg_fmt = _("%(path)s is not on local storage: %(reason)s") + + +class MigrationError(GceapiException): + msg_fmt = _("Migration error") + ": %(reason)s" + + +class MigrationPreCheckError(MigrationError): + msg_fmt = _("Migration pre-check error") + ": %(reason)s" + + +class MalformedRequestBody(GceapiException): + msg_fmt = _("Malformed message body: %(reason)s") + + +# NOTE(johannes): NotFound should only be used when a 404 error is +# appropriate to be returned +class ConfigNotFound(GceapiException): + msg_fmt = _("Could not find config at %(path)s") + + +class PasteAppNotFound(GceapiException): + msg_fmt = _("Could not load paste app '%(name)s' from %(path)s") + + +class CannotResizeToSameFlavor(GceapiException): + msg_fmt = _("When resizing, instances must change flavor!") + + +class ResizeError(GceapiException): + msg_fmt = _("Resize error: %(reason)s") + + +class CannotResizeDisk(GceapiException): + msg_fmt = _("Server disk was unable to be resized because: %(reason)s") + + +class InstanceTypeMemoryTooSmall(GceapiException): + msg_fmt = _("Instance type's memory is too small for requested image.") + + +class InstanceTypeDiskTooSmall(GceapiException): + msg_fmt = _("Instance type's disk is too small for requested image.") + + +class InsufficientFreeMemory(GceapiException): + msg_fmt = _("Insufficient free memory on compute node to start %(uuid)s.") + + +class NoValidHost(GceapiException): + msg_fmt = _("No valid host was found. %(reason)s") + + +class QuotaError(GceapiException): + ec2_code = 'ResourceLimitExceeded' + msg_fmt = _("Quota exceeded") + ": code=%(code)s" + code = 413 + headers = {'Retry-After': 0} + safe = True + + +class TooManyInstances(QuotaError): + msg_fmt = _("Quota exceeded for %(overs)s: Requested %(req)s," + " but already used %(used)d of %(allowed)d %(resource)s") + + +class FloatingIpLimitExceeded(QuotaError): + msg_fmt = _("Maximum number of floating ips exceeded") + + +class FixedIpLimitExceeded(QuotaError): + msg_fmt = _("Maximum number of fixed ips exceeded") + + +class MetadataLimitExceeded(QuotaError): + msg_fmt = _("Maximum number of metadata items exceeds %(allowed)d") + + +class OnsetFileLimitExceeded(QuotaError): + msg_fmt = _("Personality file limit exceeded") + + +class OnsetFilePathLimitExceeded(QuotaError): + msg_fmt = _("Personality file path too long") + + +class OnsetFileContentLimitExceeded(QuotaError): + msg_fmt = _("Personality file content too long") + + +class KeypairLimitExceeded(QuotaError): + msg_fmt = _("Maximum number of key pairs exceeded") + + +class SecurityGroupLimitExceeded(QuotaError): + ec2_code = 'SecurityGroupLimitExceeded' + msg_fmt = _("Maximum number of security groups or rules exceeded") + + +class PortLimitExceeded(QuotaError): + msg_fmt = _("Maximum number of ports exceeded") + + +class AggregateError(GceapiException): + msg_fmt = _("Aggregate %(aggregate_id)s: action '%(action)s' " + "caused an error: %(reason)s.") + + +class AggregateNotFound(NotFound): + msg_fmt = _("Aggregate %(aggregate_id)s could not be found.") + + +class AggregateNameExists(GceapiException): + msg_fmt = _("Aggregate %(aggregate_name)s already exists.") + + +class AggregateHostNotFound(NotFound): + msg_fmt = _("Aggregate %(aggregate_id)s has no host %(host)s.") + + +class AggregateMetadataNotFound(NotFound): + msg_fmt = _("Aggregate %(aggregate_id)s has no metadata with " + "key %(metadata_key)s.") + + +class AggregateHostExists(GceapiException): + msg_fmt = _("Aggregate %(aggregate_id)s already has host %(host)s.") + + +class InstanceTypeCreateFailed(GceapiException): + msg_fmt = _("Unable to create instance type") + + +class InstancePasswordSetFailed(GceapiException): + msg_fmt = _("Failed to set admin password on %(instance)s " + "because %(reason)s") + safe = True + + +class DuplicateVlan(GceapiException): + msg_fmt = _("Detected existing vlan with id %(vlan)d") + + +class CidrConflict(GceapiException): + msg_fmt = _("There was a conflict when trying to complete your request.") + code = 409 + + +class InstanceNotFound(NotFound): + ec2_code = 'InvalidInstanceID.NotFound' + msg_fmt = _("Instance %(instance_id)s could not be found.") + + +class InstanceInfoCacheNotFound(NotFound): + msg_fmt = _("Info cache for instance %(instance_uuid)s could not be " + "found.") + + +class NodeNotFound(NotFound): + msg_fmt = _("Node %(node_id)s could not be found.") + + +class NodeNotFoundByUUID(NotFound): + msg_fmt = _("Node with UUID %(node_uuid)s could not be found.") + + +class MarkerNotFound(NotFound): + msg_fmt = _("Marker %(marker)s could not be found.") + + +class InvalidInstanceIDMalformed(Invalid): + ec2_code = 'InvalidInstanceID.Malformed' + msg_fmt = _("Invalid id: %(val)s (expecting \"i-...\").") + + +class CouldNotFetchImage(GceapiException): + msg_fmt = _("Could not fetch image %(image_id)s") + + +class CouldNotUploadImage(GceapiException): + msg_fmt = _("Could not upload image %(image_id)s") + + +class TaskAlreadyRunning(GceapiException): + msg_fmt = _("Task %(task_name)s is already running on host %(host)s") + + +class TaskNotRunning(GceapiException): + msg_fmt = _("Task %(task_name)s is not running on host %(host)s") + + +class InstanceIsLocked(InstanceInvalidState): + msg_fmt = _("Instance %(instance_uuid)s is locked") + + +class ConfigDriveInvalidValue(Invalid): + msg_fmt = _("Invalid value for Config Drive option: %(option)s") + + +class ConfigDriveMountFailed(GceapiException): + msg_fmt = _("Could not mount vfat config drive. %(operation)s failed. " + "Error: %(error)s") + + +class ConfigDriveUnknownFormat(GceapiException): + msg_fmt = _("Unknown config drive format %(format)s. Select one of " + "iso9660 or vfat.") + + +class InterfaceAttachFailed(Invalid): + msg_fmt = _("Failed to attach network adapter device to %(instance)s") + + +class InterfaceDetachFailed(Invalid): + msg_fmt = _("Failed to detach network adapter device from %(instance)s") + + +class InstanceUserDataTooLarge(GceapiException): + msg_fmt = _("User data too large. User data must be no larger than " + "%(maxsize)s bytes once base64 encoded. Your data is " + "%(length)d bytes") + + +class InstanceUserDataMalformed(GceapiException): + msg_fmt = _("User data needs to be valid base 64.") + + +class UnexpectedTaskStateError(GceapiException): + msg_fmt = _("unexpected task state: expecting %(expected)s but " + "the actual state is %(actual)s") + + +class InstanceActionNotFound(GceapiException): + msg_fmt = _("Action for request_id %(request_id)s on instance" + " %(instance_uuid)s not found") + + +class InstanceActionEventNotFound(GceapiException): + msg_fmt = _("Event %(event)s not found for action id %(action_id)s") + + +class UnexpectedVMStateError(GceapiException): + msg_fmt = _("unexpected VM state: expecting %(expected)s but " + "the actual state is %(actual)s") + + +class CryptoCAFileNotFound(FileNotFound): + msg_fmt = _("The CA file for %(project)s could not be found") + + +class CryptoCRLFileNotFound(FileNotFound): + msg_fmt = _("The CRL file for %(project)s could not be found") + + +class InstanceRecreateNotSupported(Invalid): + msg_fmt = _('Instance recreate is not implemented by this virt driver.') + + +class ServiceGroupUnavailable(GceapiException): + msg_fmt = _("The service from servicegroup driver %(driver)s is " + "temporarily unavailable.") + + +class DBNotAllowed(GceapiException): + msg_fmt = _('%(binary)s attempted direct database access which is ' + 'not allowed by policy') + + +class UnsupportedVirtType(Invalid): + msg_fmt = _("Virtualization type '%(virt)s' is not supported by " + "this compute driver") + + +class UnsupportedHardware(Invalid): + msg_fmt = _("Requested hardware '%(model)s' is not supported by " + "the '%(virt)s' virt driver") + + +class Base64Exception(GceapiException): + msg_fmt = _("Invalid Base 64 data for file %(path)s") + + +class BuildAbortException(GceapiException): + msg_fmt = _("Build of instance %(instance_uuid)s aborted: %(reason)s") + + +class RescheduledException(GceapiException): + msg_fmt = _("Build of instance %(instance_uuid)s was re-scheduled: " + "%(reason)s") + + +class ShadowTableExists(GceapiException): + msg_fmt = _("Shadow table with name %(name)s already exists.") + + +class InstanceFaultRollback(GceapiException): + def __init__(self, inner_exception=None): + message = _("Instance rollback performed due to: %s") + self.inner_exception = inner_exception + super(InstanceFaultRollback, self).__init__(message % inner_exception) + + +class UnsupportedObjectError(GceapiException): + msg_fmt = _('Unsupported object type %(objtype)s') + + +class OrphanedObjectError(GceapiException): + msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object') + + +class IncompatibleObjectVersion(GceapiException): + msg_fmt = _('Version %(objver)s of %(objname)s is not supported') + + +class ObjectActionError(GceapiException): + msg_fmt = _('Object action %(action)s failed because: %(reason)s') + + +class CoreAPIMissing(GceapiException): + msg_fmt = _("Core API extensions are missing: %(missing_apis)s") + + +class AgentError(GceapiException): + msg_fmt = _('Error during following call to agent: %(method)s') + + +class AgentTimeout(AgentError): + msg_fmt = _('Unable to contact guest agent. ' + 'The following call timed out: %(method)s') + + +class AgentNotImplemented(AgentError): + msg_fmt = _('Agent does not support the call: %(method)s') + + +class InstanceGroupNotFound(NotFound): + msg_fmt = _("Instance group %(group_uuid)s could not be found.") + + +class InstanceGroupIdExists(GceapiException): + msg_fmt = _("Instance group %(group_uuid)s already exists.") + + +class InstanceGroupMetadataNotFound(NotFound): + msg_fmt = _("Instance group %(group_uuid)s has no metadata with " + "key %(metadata_key)s.") + + +class InstanceGroupMemberNotFound(NotFound): + msg_fmt = _("Instance group %(group_uuid)s has no member with " + "id %(instance_id)s.") + + +class InstanceGroupPolicyNotFound(NotFound): + msg_fmt = _("Instance group %(group_uuid)s has no policy %(policy)s.") + + +class PluginRetriesExceeded(GceapiException): + msg_fmt = _("Number of retries to plugin (%(num_retries)d) exceeded.") + + +class ImageDownloadModuleError(GceapiException): + msg_fmt = _("There was an error with the download module %(module)s. " + "%(reason)s") + + +class ImageDownloadModuleMetaDataError(ImageDownloadModuleError): + msg_fmt = _("The metadata for this location will not work with this " + "module %(module)s. %(reason)s.") + + +class ImageDownloadModuleNotImplementedError(ImageDownloadModuleError): + msg_fmt = _("The method %(method_name)s is not implemented.") + + +class ImageDownloadModuleConfigurationError(ImageDownloadModuleError): + msg_fmt = _("The module %(module)s is misconfigured: %(reason)s.") + + +class PciDeviceWrongAddressFormat(GceapiException): + msg_fmt = _("The PCI address %(address)s has an incorrect format.") + + +class PciDeviceNotFoundById(NotFound): + msg_fmt = _("PCI device %(id)s not found") + + +class PciDeviceNotFound(GceapiException): + msg_fmt = _("PCI Device %(node_id)s:%(address)s not found.") + + +class PciDeviceInvalidStatus(GceapiException): + msg_fmt = _( + "PCI Device %(compute_node_id)s:%(address)s is %(status)s " + "instead of %(hopestatus)s") + + +class PciDeviceInvalidOwner(GceapiException): + msg_fmt = _( + "PCI Device %(compute_node_id)s:%(address)s is owned by %(owner)s " + "instead of %(hopeowner)s") + + +class PciDeviceRequestFailed(GceapiException): + msg_fmt = _( + "PCI Device request (%requests)s failed") + + +class PciDevicePoolEmpty(GceapiException): + msg_fmt = _( + "Attempt to consume PCI Device %(compute_node_id)s:%(address)s " + "from empty pool") + + +class PciInvalidAlias(GceapiException): + msg_fmt = _("Invalid PCI alias definition: %(reason)s") + + +class PciRequestAliasNotDefined(GceapiException): + msg_fmt = _("PCI alias %(alias)s is not defined") + + +class MissingParameter(GceapiException): + ec2_code = 'MissingParameter' + msg_fmt = _("Not enough parameters: %(reason)s") + code = 400 + + +class PciConfigInvalidWhitelist(Invalid): + msg_fmt = _("Invalid PCI devices Whitelist config %(reason)s") + + +class PciTrackerInvalidNodeId(GceapiException): + msg_fmt = _("Cannot change %(node_id)s to %(new_node_id)s") + + +# Cannot be templated, msg needs to be constructed when raised. +class InternalError(GceapiException): + ec2_code = 'InternalError' + msg_fmt = "%(err)s" + + +class PciDevicePrepareFailed(GceapiException): + msg_fmt = _("Failed to prepare PCI device %(id)s for instance " + "%(instance_uuid)s: %(reason)s") + + +class PciDeviceDetachFailed(GceapiException): + msg_fmt = _("Failed to detach PCI device %(dev)s: %(reason)s") + + +class PciDeviceUnsupportedHypervisor(GceapiException): + msg_fmt = _("%(type)s hypervisor does not support PCI devices") + + +class KeyManagerError(GceapiException): + msg_fmt = _("key manager error: %(reason)s") diff --git a/gceapi/openstack/__init__.py b/gceapi/openstack/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gceapi/openstack/common/__init__.py b/gceapi/openstack/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gceapi/openstack/common/db/__init__.py b/gceapi/openstack/common/db/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gceapi/openstack/common/db/api.py b/gceapi/openstack/common/db/api.py new file mode 100644 index 0000000..5a6f9f1 --- /dev/null +++ b/gceapi/openstack/common/db/api.py @@ -0,0 +1,57 @@ +# Copyright (c) 2013 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Multiple DB API backend support. + +Supported configuration options: + +The following two parameters are in the 'database' group: +`backend`: DB backend name or full module path to DB backend module. + +A DB backend module should implement a method named 'get_backend' which +takes no arguments. The method can return any object that implements DB +API methods. +""" + +from oslo.config import cfg + +from gceapi.openstack.common import importutils + + +db_opts = [ + cfg.StrOpt('backend', + default='sqlalchemy', + deprecated_name='db_backend', + deprecated_group='DEFAULT', + help='The backend to use for db'), +] + +CONF = cfg.CONF +CONF.register_opts(db_opts, 'database') + + +class DBAPI(object): + def __init__(self, backend_mapping=None): + if backend_mapping is None: + backend_mapping = {} + backend_name = CONF.database.backend + # Import the untranslated name if we don't have a + # mapping. + backend_path = backend_mapping.get(backend_name, backend_name) + backend_mod = importutils.import_module(backend_path) + self.__backend = backend_mod.get_backend() + + def __getattr__(self, key): + return getattr(self.__backend, key) diff --git a/gceapi/openstack/common/db/exception.py b/gceapi/openstack/common/db/exception.py new file mode 100644 index 0000000..e4bafe0 --- /dev/null +++ b/gceapi/openstack/common/db/exception.py @@ -0,0 +1,54 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""DB related custom exceptions.""" + +from gceapi.openstack.common.gettextutils import _ + + +class DBError(Exception): + """Wraps an implementation specific exception.""" + def __init__(self, inner_exception=None): + self.inner_exception = inner_exception + super(DBError, self).__init__(str(inner_exception)) + + +class DBDuplicateEntry(DBError): + """Wraps an implementation specific exception.""" + def __init__(self, columns=[], inner_exception=None): + self.columns = columns + super(DBDuplicateEntry, self).__init__(inner_exception) + + +class DBDeadlock(DBError): + def __init__(self, inner_exception=None): + super(DBDeadlock, self).__init__(inner_exception) + + +class DBInvalidUnicodeParameter(Exception): + message = _("Invalid Parameter: " + "Unicode is not supported by the current database.") + + +class DbMigrationError(DBError): + """Wraps migration specific exception.""" + def __init__(self, message=None): + super(DbMigrationError, self).__init__(str(message)) + + +class DBConnectionError(DBError): + """Wraps connection specific exception.""" + pass diff --git a/gceapi/openstack/common/db/sqlalchemy/__init__.py b/gceapi/openstack/common/db/sqlalchemy/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gceapi/openstack/common/db/sqlalchemy/migration.py b/gceapi/openstack/common/db/sqlalchemy/migration.py new file mode 100644 index 0000000..dbac902 --- /dev/null +++ b/gceapi/openstack/common/db/sqlalchemy/migration.py @@ -0,0 +1,265 @@ +# coding: utf-8 +# +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Base on code in migrate/changeset/databases/sqlite.py which is under +# the following license: +# +# The MIT License +# +# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +import os +import re + +from migrate.changeset import ansisql +from migrate.changeset.databases import sqlite +from migrate import exceptions as versioning_exceptions +from migrate.versioning import api as versioning_api +from migrate.versioning.repository import Repository +import sqlalchemy +from sqlalchemy.schema import UniqueConstraint + +from gceapi.openstack.common.db import exception +from gceapi.openstack.common.db.sqlalchemy import session as db_session +from gceapi.openstack.common.gettextutils import _ + + +get_engine = db_session.get_engine + + +def _get_unique_constraints(self, table): + """Retrieve information about existing unique constraints of the table + + This feature is needed for _recreate_table() to work properly. + Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x. + + """ + + data = table.metadata.bind.execute( + """SELECT sql + FROM sqlite_master + WHERE + type='table' AND + name=:table_name""", + table_name=table.name + ).fetchone()[0] + + UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)" + return [ + UniqueConstraint( + *[getattr(table.columns, c.strip(' "')) for c in cols.split(",")], + name=name + ) + for name, cols in re.findall(UNIQUE_PATTERN, data) + ] + + +def _recreate_table(self, table, column=None, delta=None, omit_uniques=None): + """Recreate the table properly + + Unlike the corresponding original method of sqlalchemy-migrate this one + doesn't drop existing unique constraints when creating a new one. + + """ + + table_name = self.preparer.format_table(table) + + # we remove all indexes so as not to have + # problems during copy and re-create + for index in table.indexes: + index.drop() + + # reflect existing unique constraints + for uc in self._get_unique_constraints(table): + table.append_constraint(uc) + # omit given unique constraints when creating a new table if required + table.constraints = set([ + cons for cons in table.constraints + if omit_uniques is None or cons.name not in omit_uniques + ]) + + self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name) + self.execute() + + insertion_string = self._modify_table(table, column, delta) + + table.create(bind=self.connection) + self.append(insertion_string % {'table_name': table_name}) + self.execute() + self.append('DROP TABLE migration_tmp') + self.execute() + + +def _visit_migrate_unique_constraint(self, *p, **k): + """Drop the given unique constraint + + The corresponding original method of sqlalchemy-migrate just + raises NotImplemented error + + """ + + self.recreate_table(p[0].table, omit_uniques=[p[0].name]) + + +def patch_migrate(): + """A workaround for SQLite's inability to alter things + + SQLite abilities to alter tables are very limited (please read + http://www.sqlite.org/lang_altertable.html for more details). + E. g. one can't drop a column or a constraint in SQLite. The + workaround for this is to recreate the original table omitting + the corresponding constraint (or column). + + sqlalchemy-migrate library has recreate_table() method that + implements this workaround, but it does it wrong: + + - information about unique constraints of a table + is not retrieved. So if you have a table with one + unique constraint and a migration adding another one + you will end up with a table that has only the + latter unique constraint, and the former will be lost + + - dropping of unique constraints is not supported at all + + The proper way to fix this is to provide a pull-request to + sqlalchemy-migrate, but the project seems to be dead. So we + can go on with monkey-patching of the lib at least for now. + + """ + + # this patch is needed to ensure that recreate_table() doesn't drop + # existing unique constraints of the table when creating a new one + helper_cls = sqlite.SQLiteHelper + helper_cls.recreate_table = _recreate_table + helper_cls._get_unique_constraints = _get_unique_constraints + + # this patch is needed to be able to drop existing unique constraints + constraint_cls = sqlite.SQLiteConstraintDropper + constraint_cls.visit_migrate_unique_constraint = \ + _visit_migrate_unique_constraint + constraint_cls.__bases__ = (ansisql.ANSIColumnDropper, + sqlite.SQLiteConstraintGenerator) + + +def db_sync(abs_path, version=None, init_version=0): + """Upgrade or downgrade a database. + + Function runs the upgrade() or downgrade() functions in change scripts. + + :param abs_path: Absolute path to migrate repository. + :param version: Database will upgrade/downgrade until this version. + If None - database will update to the latest + available version. + :param init_version: Initial database version + """ + if version is not None: + try: + version = int(version) + except ValueError: + raise exception.DbMigrationError( + message=_("version should be an integer")) + + current_version = db_version(abs_path, init_version) + repository = _find_migrate_repo(abs_path) + _db_schema_sanity_check() + if version is None or version > current_version: + return versioning_api.upgrade(get_engine(), repository, version) + else: + return versioning_api.downgrade(get_engine(), repository, + version) + + +def _db_schema_sanity_check(): + engine = get_engine() + if engine.name == 'mysql': + onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION ' + 'from information_schema.TABLES ' + 'where TABLE_SCHEMA=%s and ' + 'TABLE_COLLATION NOT LIKE "%%utf8%%"') + + table_names = [res[0] for res in engine.execute(onlyutf8_sql, + engine.url.database)] + if len(table_names) > 0: + raise ValueError(_('Tables "%s" have non utf8 collation, ' + 'please make sure all tables are CHARSET=utf8' + ) % ','.join(table_names)) + + +def db_version(abs_path, init_version): + """Show the current version of the repository. + + :param abs_path: Absolute path to migrate repository + :param version: Initial database version + """ + repository = _find_migrate_repo(abs_path) + try: + return versioning_api.db_version(get_engine(), repository) + except versioning_exceptions.DatabaseNotControlledError: + meta = sqlalchemy.MetaData() + engine = get_engine() + meta.reflect(bind=engine) + tables = meta.tables + if len(tables) == 0 or 'alembic_version' in tables: + db_version_control(abs_path, init_version) + return versioning_api.db_version(get_engine(), repository) + else: + raise exception.DbMigrationError( + message=_( + "The database is not under version control, but has " + "tables. Please stamp the current version of the schema " + "manually.")) + + +def db_version_control(abs_path, version=None): + """Mark a database as under this repository's version control. + + Once a database is under version control, schema changes should + only be done via change scripts in this repository. + + :param abs_path: Absolute path to migrate repository + :param version: Initial database version + """ + repository = _find_migrate_repo(abs_path) + versioning_api.version_control(get_engine(), repository, version) + return version + + +def _find_migrate_repo(abs_path): + """Get the project's change script repository + + :param abs_path: Absolute path to migrate repository + """ + if not os.path.exists(abs_path): + raise exception.DbMigrationError("Path %s not found" % abs_path) + return Repository(abs_path) diff --git a/gceapi/openstack/common/db/sqlalchemy/models.py b/gceapi/openstack/common/db/sqlalchemy/models.py new file mode 100644 index 0000000..1b6e5c2 --- /dev/null +++ b/gceapi/openstack/common/db/sqlalchemy/models.py @@ -0,0 +1,117 @@ +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Piston Cloud Computing, Inc. +# Copyright 2012 Cloudscaling Group, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +SQLAlchemy models. +""" + +import six + +from sqlalchemy import Column, Integer +from sqlalchemy import DateTime +from sqlalchemy.orm import object_mapper + +from gceapi.openstack.common.db.sqlalchemy import session as sa +from gceapi.openstack.common import timeutils + + +class ModelBase(object): + """Base class for models.""" + __table_initialized__ = False + + def save(self, session=None): + """Save this object.""" + if not session: + session = sa.get_session() + # NOTE(boris-42): This part of code should be look like: + # session.add(self) + # session.flush() + # But there is a bug in sqlalchemy and eventlet that + # raises NoneType exception if there is no running + # transaction and rollback is called. As long as + # sqlalchemy has this bug we have to create transaction + # explicitly. + with session.begin(subtransactions=True): + session.add(self) + session.flush() + + def __setitem__(self, key, value): + setattr(self, key, value) + + def __getitem__(self, key): + return getattr(self, key) + + def get(self, key, default=None): + return getattr(self, key, default) + + @property + def _extra_keys(self): + """Specifies custom fields + + Subclasses can override this property to return a list + of custom fields that should be included in their dict + representation. + + For reference check tests/db/sqlalchemy/test_models.py + """ + return [] + + def __iter__(self): + columns = dict(object_mapper(self).columns).keys() + # NOTE(russellb): Allow models to specify other keys that can be looked + # up, beyond the actual db columns. An example would be the 'name' + # property for an Instance. + columns.extend(self._extra_keys) + self._i = iter(columns) + return self + + def next(self): + n = six.advance_iterator(self._i) + return n, getattr(self, n) + + def update(self, values): + """Make the model object behave like a dict.""" + for k, v in six.iteritems(values): + setattr(self, k, v) + + def iteritems(self): + """Make the model object behave like a dict. + + Includes attributes from joins. + """ + local = dict(self) + joined = dict([(k, v) for k, v in six.iteritems(self.__dict__) + if not k[0] == '_']) + local.update(joined) + return six.iteritems(local) + + +class TimestampMixin(object): + created_at = Column(DateTime, default=lambda: timeutils.utcnow()) + updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow()) + + +class SoftDeleteMixin(object): + deleted_at = Column(DateTime) + deleted = Column(Integer, default=0) + + def soft_delete(self, session=None): + """Mark this object as deleted.""" + self.deleted = self.id + self.deleted_at = timeutils.utcnow() + self.save(session=session) diff --git a/gceapi/openstack/common/db/sqlalchemy/provision.py b/gceapi/openstack/common/db/sqlalchemy/provision.py new file mode 100644 index 0000000..42c3c94 --- /dev/null +++ b/gceapi/openstack/common/db/sqlalchemy/provision.py @@ -0,0 +1,187 @@ +# Copyright 2013 Mirantis.inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provision test environment for specific DB backends""" + +import argparse +import os +import random +import string + +from six import moves +import sqlalchemy + +from gceapi.openstack.common.db import exception as exc + + +SQL_CONNECTION = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION', 'sqlite://') + + +def _gen_credentials(*names): + """Generate credentials.""" + auth_dict = {} + for name in names: + val = ''.join(random.choice(string.ascii_lowercase) + for i in moves.range(10)) + auth_dict[name] = val + return auth_dict + + +def _get_engine(uri=SQL_CONNECTION): + """Engine creation + + By default the uri is SQL_CONNECTION which is admin credentials. + Call the function without arguments to get admin connection. Admin + connection required to create temporary user and database for each + particular test. Otherwise use existing connection to recreate connection + to the temporary database. + """ + return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool) + + +def _execute_sql(engine, sql, driver): + """Initialize connection, execute sql query and close it.""" + try: + with engine.connect() as conn: + if driver == 'postgresql': + conn.connection.set_isolation_level(0) + for s in sql: + conn.execute(s) + except sqlalchemy.exc.OperationalError: + msg = ('%s does not match database admin ' + 'credentials or database does not exist.') + raise exc.DBConnectionError(msg % SQL_CONNECTION) + + +def create_database(engine): + """Provide temporary user and database for each particular test.""" + driver = engine.name + + auth = _gen_credentials('database', 'user', 'passwd') + + sqls = { + 'mysql': [ + "drop database if exists %(database)s;", + "grant all on %(database)s.* to '%(user)s'@'localhost'" + " identified by '%(passwd)s';", + "create database %(database)s;", + ], + 'postgresql': [ + "drop database if exists %(database)s;", + "drop user if exists %(user)s;", + "create user %(user)s with password '%(passwd)s';", + "create database %(database)s owner %(user)s;", + ] + } + + if driver == 'sqlite': + return 'sqlite:////tmp/%s' % auth['database'] + + try: + sql_rows = sqls[driver] + except KeyError: + raise ValueError('Unsupported RDBMS %s' % driver) + sql_query = map(lambda x: x % auth, sql_rows) + + _execute_sql(engine, sql_query, driver) + + params = auth.copy() + params['backend'] = driver + return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params + + +def drop_database(engine, current_uri): + """Drop temporary database and user after each particular test.""" + engine = _get_engine(current_uri) + admin_engine = _get_engine() + driver = engine.name + auth = {'database': engine.url.database, 'user': engine.url.username} + + if driver == 'sqlite': + try: + os.remove(auth['database']) + except OSError: + pass + return + + sqls = { + 'mysql': [ + "drop database if exists %(database)s;", + "drop user '%(user)s'@'localhost';", + ], + 'postgresql': [ + "drop database if exists %(database)s;", + "drop user if exists %(user)s;", + ] + } + + try: + sql_rows = sqls[driver] + except KeyError: + raise ValueError('Unsupported RDBMS %s' % driver) + sql_query = map(lambda x: x % auth, sql_rows) + + _execute_sql(admin_engine, sql_query, driver) + + +def main(): + """Controller to handle commands + + ::create: Create test user and database with random names. + ::drop: Drop user and database created by previous command. + """ + parser = argparse.ArgumentParser( + description='Controller to handle database creation and dropping' + ' commands.', + epilog='Under normal circumstances is not used directly.' + ' Used in .testr.conf to automate test database creation' + ' and dropping processes.') + subparsers = parser.add_subparsers( + help='Subcommands to manipulate temporary test databases.') + + create = subparsers.add_parser( + 'create', + help='Create temporary test ' + 'databases and users.') + create.set_defaults(which='create') + create.add_argument( + 'instances_count', + type=int, + help='Number of databases to create.') + + drop = subparsers.add_parser( + 'drop', + help='Drop temporary test databases and users.') + drop.set_defaults(which='drop') + drop.add_argument( + 'instances', + nargs='+', + help='List of databases uri to be dropped.') + + args = parser.parse_args() + + engine = _get_engine() + which = args.which + + if which == "create": + for i in range(int(args.instances_count)): + print(create_database(engine)) + elif which == "drop": + for db in args.instances: + drop_database(engine, db) + + +if __name__ == "__main__": + main() diff --git a/gceapi/openstack/common/db/sqlalchemy/session.py b/gceapi/openstack/common/db/sqlalchemy/session.py new file mode 100644 index 0000000..7024e1b --- /dev/null +++ b/gceapi/openstack/common/db/sqlalchemy/session.py @@ -0,0 +1,867 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Session Handling for SQLAlchemy backend. + +Initializing: + +* Call set_defaults with the minimal of the following kwargs: + sql_connection, sqlite_db + + Example:: + + session.set_defaults( + sql_connection="sqlite:///var/lib/gceapi/sqlite.db", + sqlite_db="/var/lib/gceapi/sqlite.db") + +Recommended ways to use sessions within this framework: + +* Don't use them explicitly; this is like running with AUTOCOMMIT=1. + model_query() will implicitly use a session when called without one + supplied. This is the ideal situation because it will allow queries + to be automatically retried if the database connection is interrupted. + + Note: Automatic retry will be enabled in a future patch. + + It is generally fine to issue several queries in a row like this. Even though + they may be run in separate transactions and/or separate sessions, each one + will see the data from the prior calls. If needed, undo- or rollback-like + functionality should be handled at a logical level. For an example, look at + the code around quotas and reservation_rollback(). + + Examples:: + + def get_foo(context, foo): + return (model_query(context, models.Foo). + filter_by(foo=foo). + first()) + + def update_foo(context, id, newfoo): + (model_query(context, models.Foo). + filter_by(id=id). + update({'foo': newfoo})) + + def create_foo(context, values): + foo_ref = models.Foo() + foo_ref.update(values) + foo_ref.save() + return foo_ref + + +* Within the scope of a single method, keeping all the reads and writes within + the context managed by a single session. In this way, the session's __exit__ + handler will take care of calling flush() and commit() for you. + If using this approach, you should not explicitly call flush() or commit(). + Any error within the context of the session will cause the session to emit + a ROLLBACK. Database Errors like IntegrityError will be raised in + session's __exit__ handler, and any try/except within the context managed + by session will not be triggered. And catching other non-database errors in + the session will not trigger the ROLLBACK, so exception handlers should + always be outside the session, unless the developer wants to do a partial + commit on purpose. If the connection is dropped before this is possible, + the database will implicitly roll back the transaction. + + Note: statements in the session scope will not be automatically retried. + + If you create models within the session, they need to be added, but you + do not need to call model.save() + + :: + + def create_many_foo(context, foos): + session = get_session() + with session.begin(): + for foo in foos: + foo_ref = models.Foo() + foo_ref.update(foo) + session.add(foo_ref) + + def update_bar(context, foo_id, newbar): + session = get_session() + with session.begin(): + foo_ref = (model_query(context, models.Foo, session). + filter_by(id=foo_id). + first()) + (model_query(context, models.Bar, session). + filter_by(id=foo_ref['bar_id']). + update({'bar': newbar})) + + Note: update_bar is a trivially simple example of using "with session.begin". + Whereas create_many_foo is a good example of when a transaction is needed, + it is always best to use as few queries as possible. The two queries in + update_bar can be better expressed using a single query which avoids + the need for an explicit transaction. It can be expressed like so:: + + def update_bar(context, foo_id, newbar): + subq = (model_query(context, models.Foo.id). + filter_by(id=foo_id). + limit(1). + subquery()) + (model_query(context, models.Bar). + filter_by(id=subq.as_scalar()). + update({'bar': newbar})) + + For reference, this emits approximately the following SQL statement:: + + UPDATE bar SET bar = ${newbar} + WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1); + + Note: create_duplicate_foo is a trivially simple example of catching an + exception while using "with session.begin". Here create two duplicate + instances with same primary key, must catch the exception out of context + managed by a single session: + + def create_duplicate_foo(context): + foo1 = models.Foo() + foo2 = models.Foo() + foo1.id = foo2.id = 1 + session = get_session() + try: + with session.begin(): + session.add(foo1) + session.add(foo2) + except exception.DBDuplicateEntry as e: + handle_error(e) + +* Passing an active session between methods. Sessions should only be passed + to private methods. The private method must use a subtransaction; otherwise + SQLAlchemy will throw an error when you call session.begin() on an existing + transaction. Public methods should not accept a session parameter and should + not be involved in sessions within the caller's scope. + + Note that this incurs more overhead in SQLAlchemy than the above means + due to nesting transactions, and it is not possible to implicitly retry + failed database operations when using this approach. + + This also makes code somewhat more difficult to read and debug, because a + single database transaction spans more than one method. Error handling + becomes less clear in this situation. When this is needed for code clarity, + it should be clearly documented. + + :: + + def myfunc(foo): + session = get_session() + with session.begin(): + # do some database things + bar = _private_func(foo, session) + return bar + + def _private_func(foo, session=None): + if not session: + session = get_session() + with session.begin(subtransaction=True): + # do some other database things + return bar + + +There are some things which it is best to avoid: + +* Don't keep a transaction open any longer than necessary. + + This means that your "with session.begin()" block should be as short + as possible, while still containing all the related calls for that + transaction. + +* Avoid "with_lockmode('UPDATE')" when possible. + + In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match + any rows, it will take a gap-lock. This is a form of write-lock on the + "gap" where no rows exist, and prevents any other writes to that space. + This can effectively prevent any INSERT into a table by locking the gap + at the end of the index. Similar problems will occur if the SELECT FOR UPDATE + has an overly broad WHERE clause, or doesn't properly use an index. + + One idea proposed at ODS Fall '12 was to use a normal SELECT to test the + number of rows matching a query, and if only one row is returned, + then issue the SELECT FOR UPDATE. + + The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE. + However, this can not be done until the "deleted" columns are removed and + proper UNIQUE constraints are added to the tables. + + +Enabling soft deletes: + +* To use/enable soft-deletes, the SoftDeleteMixin must be added + to your model class. For example:: + + class NovaBase(models.SoftDeleteMixin, models.ModelBase): + pass + + +Efficient use of soft deletes: + +* There are two possible ways to mark a record as deleted:: + + model.soft_delete() and query.soft_delete(). + + model.soft_delete() method works with single already fetched entry. + query.soft_delete() makes only one db request for all entries that correspond + to query. + +* In almost all cases you should use query.soft_delete(). Some examples:: + + def soft_delete_bar(): + count = model_query(BarModel).find(some_condition).soft_delete() + if count == 0: + raise Exception("0 entries were soft deleted") + + def complex_soft_delete_with_synchronization_bar(session=None): + if session is None: + session = get_session() + with session.begin(subtransactions=True): + count = (model_query(BarModel). + find(some_condition). + soft_delete(synchronize_session=True)) + # Here synchronize_session is required, because we + # don't know what is going on in outer session. + if count == 0: + raise Exception("0 entries were soft deleted") + +* There is only one situation where model.soft_delete() is appropriate: when + you fetch a single record, work with it, and mark it as deleted in the same + transaction. + + :: + + def soft_delete_bar_model(): + session = get_session() + with session.begin(): + bar_ref = model_query(BarModel).find(some_condition).first() + # Work with bar_ref + bar_ref.soft_delete(session=session) + + However, if you need to work with all entries that correspond to query and + then soft delete them you should use query.soft_delete() method:: + + def soft_delete_multi_models(): + session = get_session() + with session.begin(): + query = (model_query(BarModel, session=session). + find(some_condition)) + model_refs = query.all() + # Work with model_refs + query.soft_delete(synchronize_session=False) + # synchronize_session=False should be set if there is no outer + # session and these entries are not used after this. + + When working with many rows, it is very important to use query.soft_delete, + which issues a single query. Using model.soft_delete(), as in the following + example, is very inefficient. + + :: + + for bar_ref in bar_refs: + bar_ref.soft_delete(session=session) + # This will produce count(bar_refs) db requests. +""" + +import functools +import os.path +import re +import time + +from oslo.config import cfg +import six +from sqlalchemy import exc as sqla_exc +from sqlalchemy.interfaces import PoolListener +import sqlalchemy.orm +from sqlalchemy.pool import NullPool, StaticPool +from sqlalchemy.sql.expression import literal_column + +from gceapi.openstack.common.db import exception +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import log as logging +from gceapi.openstack.common import timeutils + +sqlite_db_opts = [ + cfg.StrOpt('sqlite_db', + default='gceapi.sqlite', + help='The file name to use with SQLite'), + cfg.BoolOpt('sqlite_synchronous', + default=True, + help='If True, SQLite uses synchronous mode'), +] + +database_opts = [ + cfg.StrOpt('connection', + default='sqlite:///' + + os.path.abspath(os.path.join(os.path.dirname(__file__), + '../', '$sqlite_db')), + help='The SQLAlchemy connection string used to connect to the ' + 'database', + secret=True, + deprecated_opts=[cfg.DeprecatedOpt('sql_connection', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_connection', + group='DATABASE'), + cfg.DeprecatedOpt('connection', + group='sql'), ]), + cfg.StrOpt('slave_connection', + default='', + secret=True, + help='The SQLAlchemy connection string used to connect to the ' + 'slave database'), + cfg.IntOpt('idle_timeout', + default=3600, + deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_idle_timeout', + group='DATABASE'), + cfg.DeprecatedOpt('idle_timeout', + group='sql')], + help='Timeout before idle sql connections are reaped'), + cfg.IntOpt('min_pool_size', + default=1, + deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_min_pool_size', + group='DATABASE')], + help='Minimum number of SQL connections to keep open in a ' + 'pool'), + cfg.IntOpt('max_pool_size', + default=None, + deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_max_pool_size', + group='DATABASE')], + help='Maximum number of SQL connections to keep open in a ' + 'pool'), + cfg.IntOpt('max_retries', + default=10, + deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries', + group='DEFAULT'), + cfg.DeprecatedOpt('sql_max_retries', + group='DATABASE')], + help='Maximum db connection retries during startup. ' + '(setting -1 implies an infinite retry count)'), + cfg.IntOpt('retry_interval', + default=10, + deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval', + group='DEFAULT'), + cfg.DeprecatedOpt('reconnect_interval', + group='DATABASE')], + help='Interval between retries of opening a sql connection'), + cfg.IntOpt('max_overflow', + default=None, + deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow', + group='DEFAULT'), + cfg.DeprecatedOpt('sqlalchemy_max_overflow', + group='DATABASE')], + help='If set, use this value for max_overflow with sqlalchemy'), + cfg.IntOpt('connection_debug', + default=0, + deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug', + group='DEFAULT')], + help='Verbosity of SQL debugging information. 0=None, ' + '100=Everything'), + cfg.BoolOpt('connection_trace', + default=False, + deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace', + group='DEFAULT')], + help='Add python stack traces to SQL as comment strings'), + cfg.IntOpt('pool_timeout', + default=None, + deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout', + group='DATABASE')], + help='If set, use this value for pool_timeout with sqlalchemy'), +] + +CONF = cfg.CONF +CONF.register_opts(sqlite_db_opts) +CONF.register_opts(database_opts, 'database') + +LOG = logging.getLogger(__name__) + +_ENGINE = None +_MAKER = None +_SLAVE_ENGINE = None +_SLAVE_MAKER = None + + +def set_defaults(sql_connection, sqlite_db, max_pool_size=None, + max_overflow=None, pool_timeout=None): + """Set defaults for configuration variables.""" + cfg.set_defaults(database_opts, + connection=sql_connection) + cfg.set_defaults(sqlite_db_opts, + sqlite_db=sqlite_db) + # Update the QueuePool defaults + if max_pool_size is not None: + cfg.set_defaults(database_opts, + max_pool_size=max_pool_size) + if max_overflow is not None: + cfg.set_defaults(database_opts, + max_overflow=max_overflow) + if pool_timeout is not None: + cfg.set_defaults(database_opts, + pool_timeout=pool_timeout) + + +def cleanup(): + global _ENGINE, _MAKER + global _SLAVE_ENGINE, _SLAVE_MAKER + + if _MAKER: + _MAKER.close_all() + _MAKER = None + if _ENGINE: + _ENGINE.dispose() + _ENGINE = None + if _SLAVE_MAKER: + _SLAVE_MAKER.close_all() + _SLAVE_MAKER = None + if _SLAVE_ENGINE: + _SLAVE_ENGINE.dispose() + _SLAVE_ENGINE = None + + +class SqliteForeignKeysListener(PoolListener): + """Ensures that the foreign key constraints are enforced in SQLite. + + The foreign key constraints are disabled by default in SQLite, + so the foreign key constraints will be enabled here for every + database connection + """ + def connect(self, dbapi_con, con_record): + dbapi_con.execute('pragma foreign_keys=ON') + + +def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False, + slave_session=False, mysql_traditional_mode=False): + """Return a SQLAlchemy session.""" + global _MAKER + global _SLAVE_MAKER + maker = _MAKER + + if slave_session: + maker = _SLAVE_MAKER + + if maker is None: + engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session, + mysql_traditional_mode=mysql_traditional_mode) + maker = get_maker(engine, autocommit, expire_on_commit) + + if slave_session: + _SLAVE_MAKER = maker + else: + _MAKER = maker + + session = maker() + return session + + +# note(boris-42): In current versions of DB backends unique constraint +# violation messages follow the structure: +# +# sqlite: +# 1 column - (IntegrityError) column c1 is not unique +# N columns - (IntegrityError) column c1, c2, ..., N are not unique +# +# sqlite since 3.7.16: +# 1 column - (IntegrityError) UNIQUE constraint failed: k1 +# +# N columns - (IntegrityError) UNIQUE constraint failed: k1, k2 +# +# postgres: +# 1 column - (IntegrityError) duplicate key value violates unique +# constraint "users_c1_key" +# N columns - (IntegrityError) duplicate key value violates unique +# constraint "name_of_our_constraint" +# +# mysql: +# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key +# 'c1'") +# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined +# with -' for key 'name_of_our_constraint'") +_DUP_KEY_RE_DB = { + "sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"), + re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")), + "postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),), + "mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),) +} + + +def _raise_if_duplicate_entry_error(integrity_error, engine_name): + """Raise exception if two entries are duplicated. + + In this function will be raised DBDuplicateEntry exception if integrity + error wrap unique constraint violation. + """ + + def get_columns_from_uniq_cons_or_name(columns): + # note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2" + # where `t` it is table name and columns `c1`, `c2` + # are in UniqueConstraint. + uniqbase = "uniq_" + if not columns.startswith(uniqbase): + if engine_name == "postgresql": + return [columns[columns.index("_") + 1:columns.rindex("_")]] + return [columns] + return columns[len(uniqbase):].split("0")[1:] + + if engine_name not in ["mysql", "sqlite", "postgresql"]: + return + + # FIXME(johannes): The usage of the .message attribute has been + # deprecated since Python 2.6. However, the exceptions raised by + # SQLAlchemy can differ when using unicode() and accessing .message. + # An audit across all three supported engines will be necessary to + # ensure there are no regressions. + for pattern in _DUP_KEY_RE_DB[engine_name]: + match = pattern.match(integrity_error.message) + if match: + break + else: + return + + columns = match.group(1) + + if engine_name == "sqlite": + columns = columns.strip().split(", ") + else: + columns = get_columns_from_uniq_cons_or_name(columns) + raise exception.DBDuplicateEntry(columns, integrity_error) + + +# NOTE(comstud): In current versions of DB backends, Deadlock violation +# messages follow the structure: +# +# mysql: +# (OperationalError) (1213, 'Deadlock found when trying to get lock; try ' +# 'restarting transaction') +_DEADLOCK_RE_DB = { + "mysql": re.compile(r"^.*\(1213, 'Deadlock.*") +} + + +def _raise_if_deadlock_error(operational_error, engine_name): + """Raise exception on deadlock condition. + + Raise DBDeadlock exception if OperationalError contains a Deadlock + condition. + """ + re = _DEADLOCK_RE_DB.get(engine_name) + if re is None: + return + # FIXME(johannes): The usage of the .message attribute has been + # deprecated since Python 2.6. However, the exceptions raised by + # SQLAlchemy can differ when using unicode() and accessing .message. + # An audit across all three supported engines will be necessary to + # ensure there are no regressions. + m = re.match(operational_error.message) + if not m: + return + raise exception.DBDeadlock(operational_error) + + +def _wrap_db_error(f): + @functools.wraps(f) + def _wrap(*args, **kwargs): + try: + return f(*args, **kwargs) + except UnicodeEncodeError: + raise exception.DBInvalidUnicodeParameter() + # note(boris-42): We should catch unique constraint violation and + # wrap it by our own DBDuplicateEntry exception. Unique constraint + # violation is wrapped by IntegrityError. + except sqla_exc.OperationalError as e: + _raise_if_deadlock_error(e, get_engine().name) + # NOTE(comstud): A lot of code is checking for OperationalError + # so let's not wrap it for now. + raise + except sqla_exc.IntegrityError as e: + # note(boris-42): SqlAlchemy doesn't unify errors from different + # DBs so we must do this. Also in some tables (for example + # instance_types) there are more than one unique constraint. This + # means we should get names of columns, which values violate + # unique constraint, from error message. + _raise_if_duplicate_entry_error(e, get_engine().name) + raise exception.DBError(e) + except Exception as e: + LOG.exception(_('DB exception wrapped.')) + raise exception.DBError(e) + return _wrap + + +def get_engine(sqlite_fk=False, slave_engine=False, + mysql_traditional_mode=False): + """Return a SQLAlchemy engine.""" + global _ENGINE + global _SLAVE_ENGINE + engine = _ENGINE + db_uri = CONF.database.connection + + if slave_engine: + engine = _SLAVE_ENGINE + db_uri = CONF.database.slave_connection + + if engine is None: + engine = create_engine(db_uri, sqlite_fk=sqlite_fk, + mysql_traditional_mode=mysql_traditional_mode) + if slave_engine: + _SLAVE_ENGINE = engine + else: + _ENGINE = engine + + return engine + + +def _synchronous_switch_listener(dbapi_conn, connection_rec): + """Switch sqlite connections to non-synchronous mode.""" + dbapi_conn.execute("PRAGMA synchronous = OFF") + + +def _add_regexp_listener(dbapi_con, con_record): + """Add REGEXP function to sqlite connections.""" + + def regexp(expr, item): + reg = re.compile(expr) + return reg.search(six.text_type(item)) is not None + dbapi_con.create_function('regexp', 2, regexp) + + +def _thread_yield(dbapi_con, con_record): + """Ensure other greenthreads get a chance to be executed. + + If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will + execute instead of time.sleep(0). + Force a context switch. With common database backends (eg MySQLdb and + sqlite), there is no implicit yield caused by network I/O since they are + implemented by C libraries that eventlet cannot monkey patch. + """ + time.sleep(0) + + +def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy): + """Ensures that MySQL and DB2 connections are alive. + + Borrowed from: + http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f + """ + cursor = dbapi_conn.cursor() + try: + ping_sql = 'select 1' + if engine.name == 'ibm_db_sa': + # DB2 requires a table expression + ping_sql = 'select 1 from (values (1)) AS t1' + cursor.execute(ping_sql) + except Exception as ex: + if engine.dialect.is_disconnect(ex, dbapi_conn, cursor): + msg = _('Database server has gone away: %s') % ex + LOG.warning(msg) + raise sqla_exc.DisconnectionError(msg) + else: + raise + + +def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy): + """Set engine mode to 'traditional'. + + Required to prevent silent truncates at insert or update operations + under MySQL. By default MySQL truncates inserted string if it longer + than a declared field just with warning. That is fraught with data + corruption. + """ + dbapi_con.cursor().execute("SET SESSION sql_mode = TRADITIONAL;") + + +def _is_db_connection_error(args): + """Return True if error in connecting to db.""" + # NOTE(adam_g): This is currently MySQL specific and needs to be extended + # to support Postgres and others. + # For the db2, the error code is -30081 since the db2 is still not ready + conn_err_codes = ('2002', '2003', '2006', '-30081') + for err_code in conn_err_codes: + if args.find(err_code) != -1: + return True + return False + + +def create_engine(sql_connection, sqlite_fk=False, + mysql_traditional_mode=False): + """Return a new SQLAlchemy engine.""" + # NOTE(geekinutah): At this point we could be connecting to the normal + # db handle or the slave db handle. Things like + # _wrap_db_error aren't going to work well if their + # backends don't match. Let's check. + _assert_matching_drivers() + connection_dict = sqlalchemy.engine.url.make_url(sql_connection) + + engine_args = { + "pool_recycle": CONF.database.idle_timeout, + "echo": False, + 'convert_unicode': True, + } + + # Map our SQL debug level to SQLAlchemy's options + if CONF.database.connection_debug >= 100: + engine_args['echo'] = 'debug' + elif CONF.database.connection_debug >= 50: + engine_args['echo'] = True + + if "sqlite" in connection_dict.drivername: + if sqlite_fk: + engine_args["listeners"] = [SqliteForeignKeysListener()] + engine_args["poolclass"] = NullPool + + if CONF.database.connection == "sqlite://": + engine_args["poolclass"] = StaticPool + engine_args["connect_args"] = {'check_same_thread': False} + else: + if CONF.database.max_pool_size is not None: + engine_args['pool_size'] = CONF.database.max_pool_size + if CONF.database.max_overflow is not None: + engine_args['max_overflow'] = CONF.database.max_overflow + if CONF.database.pool_timeout is not None: + engine_args['pool_timeout'] = CONF.database.pool_timeout + + engine = sqlalchemy.create_engine(sql_connection, **engine_args) + + sqlalchemy.event.listen(engine, 'checkin', _thread_yield) + + if engine.name in ['mysql', 'ibm_db_sa']: + callback = functools.partial(_ping_listener, engine) + sqlalchemy.event.listen(engine, 'checkout', callback) + if mysql_traditional_mode: + sqlalchemy.event.listen(engine, 'checkout', _set_mode_traditional) + else: + LOG.warning(_("This application has not enabled MySQL traditional" + " mode, which means silent data corruption may" + " occur. Please encourage the application" + " developers to enable this mode.")) + elif 'sqlite' in connection_dict.drivername: + if not CONF.sqlite_synchronous: + sqlalchemy.event.listen(engine, 'connect', + _synchronous_switch_listener) + sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener) + + if (CONF.database.connection_trace and + engine.dialect.dbapi.__name__ == 'MySQLdb'): + _patch_mysqldb_with_stacktrace_comments() + + try: + engine.connect() + except sqla_exc.OperationalError as e: + if not _is_db_connection_error(e.args[0]): + raise + + remaining = CONF.database.max_retries + if remaining == -1: + remaining = 'infinite' + while True: + msg = _('SQL connection failed. %s attempts left.') + LOG.warning(msg % remaining) + if remaining != 'infinite': + remaining -= 1 + time.sleep(CONF.database.retry_interval) + try: + engine.connect() + break + except sqla_exc.OperationalError as e: + if (remaining != 'infinite' and remaining == 0) or \ + not _is_db_connection_error(e.args[0]): + raise + return engine + + +class Query(sqlalchemy.orm.query.Query): + """Subclass of sqlalchemy.query with soft_delete() method.""" + def soft_delete(self, synchronize_session='evaluate'): + return self.update({'deleted': literal_column('id'), + 'updated_at': literal_column('updated_at'), + 'deleted_at': timeutils.utcnow()}, + synchronize_session=synchronize_session) + + +class Session(sqlalchemy.orm.session.Session): + """Custom Session class to avoid SqlAlchemy Session monkey patching.""" + @_wrap_db_error + def query(self, *args, **kwargs): + return super(Session, self).query(*args, **kwargs) + + @_wrap_db_error + def flush(self, *args, **kwargs): + return super(Session, self).flush(*args, **kwargs) + + @_wrap_db_error + def execute(self, *args, **kwargs): + return super(Session, self).execute(*args, **kwargs) + + +def get_maker(engine, autocommit=True, expire_on_commit=False): + """Return a SQLAlchemy sessionmaker using the given engine.""" + return sqlalchemy.orm.sessionmaker(bind=engine, + class_=Session, + autocommit=autocommit, + expire_on_commit=expire_on_commit, + query_cls=Query) + + +def _patch_mysqldb_with_stacktrace_comments(): + """Adds current stack trace as a comment in queries. + + Patches MySQLdb.cursors.BaseCursor._do_query. + """ + import MySQLdb.cursors + import traceback + + old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query + + def _do_query(self, q): + stack = '' + for filename, line, method, function in traceback.extract_stack(): + # exclude various common things from trace + if filename.endswith('session.py') and method == '_do_query': + continue + if filename.endswith('api.py') and method == 'wrapper': + continue + if filename.endswith('utils.py') and method == '_inner': + continue + if filename.endswith('exception.py') and method == '_wrap': + continue + # db/api is just a wrapper around db/sqlalchemy/api + if filename.endswith('db/api.py'): + continue + # only trace inside gceapi + index = filename.rfind('gceapi') + if index == -1: + continue + stack += "File:%s:%s Method:%s() Line:%s | " \ + % (filename[index:], line, method, function) + + # strip trailing " | " from stack + if stack: + stack = stack[:-3] + qq = "%s /* %s */" % (q, stack) + else: + qq = q + old_mysql_do_query(self, qq) + + setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query) + + +def _assert_matching_drivers(): + """Make sure slave handle and normal handle have the same driver.""" + # NOTE(geekinutah): There's no use case for writing to one backend and + # reading from another. Who knows what the future holds? + if CONF.database.slave_connection == '': + return + + normal = sqlalchemy.engine.url.make_url(CONF.database.connection) + slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection) + assert normal.drivername == slave.drivername diff --git a/gceapi/openstack/common/db/sqlalchemy/test_migrations.py b/gceapi/openstack/common/db/sqlalchemy/test_migrations.py new file mode 100644 index 0000000..7162aa3 --- /dev/null +++ b/gceapi/openstack/common/db/sqlalchemy/test_migrations.py @@ -0,0 +1,269 @@ +# Copyright 2010-2011 OpenStack Foundation +# Copyright 2012-2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import os +import subprocess + +import lockfile +from six import moves +import sqlalchemy +import sqlalchemy.exc + +from gceapi.openstack.common.db.sqlalchemy import utils +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import log as logging +from gceapi.openstack.common.py3kcompat import urlutils +from gceapi.openstack.common import test + +LOG = logging.getLogger(__name__) + + +def _have_mysql(user, passwd, database): + present = os.environ.get('TEST_MYSQL_PRESENT') + if present is None: + return utils.is_backend_avail(backend='mysql', + user=user, + passwd=passwd, + database=database) + return present.lower() in ('', 'true') + + +def _have_postgresql(user, passwd, database): + present = os.environ.get('TEST_POSTGRESQL_PRESENT') + if present is None: + return utils.is_backend_avail(backend='postgres', + user=user, + passwd=passwd, + database=database) + return present.lower() in ('', 'true') + + +def _set_db_lock(lock_path=None, lock_prefix=None): + def decorator(f): + @functools.wraps(f) + def wrapper(*args, **kwargs): + try: + path = lock_path or os.environ.get("GCEAPI_LOCK_PATH") + lock = lockfile.FileLock(os.path.join(path, lock_prefix)) + with lock: + LOG.debug(_('Got lock "%s"') % f.__name__) + return f(*args, **kwargs) + finally: + LOG.debug(_('Lock released "%s"') % f.__name__) + return wrapper + return decorator + + +class BaseMigrationTestCase(test.BaseTestCase): + """Base class fort testing of migration utils.""" + + def __init__(self, *args, **kwargs): + super(BaseMigrationTestCase, self).__init__(*args, **kwargs) + + self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), + 'test_migrations.conf') + # Test machines can set the TEST_MIGRATIONS_CONF variable + # to override the location of the config file for migration testing + self.CONFIG_FILE_PATH = os.environ.get('TEST_MIGRATIONS_CONF', + self.DEFAULT_CONFIG_FILE) + self.test_databases = {} + self.migration_api = None + + def setUp(self): + super(BaseMigrationTestCase, self).setUp() + + # Load test databases from the config file. Only do this + # once. No need to re-run this on each test... + LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH) + if os.path.exists(self.CONFIG_FILE_PATH): + cp = moves.configparser.RawConfigParser() + try: + cp.read(self.CONFIG_FILE_PATH) + defaults = cp.defaults() + for key, value in defaults.items(): + self.test_databases[key] = value + except moves.configparser.ParsingError as e: + self.fail("Failed to read test_migrations.conf config " + "file. Got error: %s" % e) + else: + self.fail("Failed to find test_migrations.conf config " + "file.") + + self.engines = {} + for key, value in self.test_databases.items(): + self.engines[key] = sqlalchemy.create_engine(value) + + # We start each test case with a completely blank slate. + self._reset_databases() + + def tearDown(self): + # We destroy the test data store between each test case, + # and recreate it, which ensures that we have no side-effects + # from the tests + self._reset_databases() + super(BaseMigrationTestCase, self).tearDown() + + def execute_cmd(self, cmd=None): + process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + output = process.communicate()[0] + LOG.debug(output) + self.assertEqual(0, process.returncode, + "Failed to run: %s\n%s" % (cmd, output)) + + def _reset_pg(self, conn_pieces): + (user, + password, + database, + host) = utils.get_db_connection_info(conn_pieces) + os.environ['PGPASSWORD'] = password + os.environ['PGUSER'] = user + # note(boris-42): We must create and drop database, we can't + # drop database which we have connected to, so for such + # operations there is a special database template1. + sqlcmd = ("psql -w -U %(user)s -h %(host)s -c" + " '%(sql)s' -d template1") + + sql = ("drop database if exists %s;") % database + droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql} + self.execute_cmd(droptable) + + sql = ("create database %s;") % database + createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql} + self.execute_cmd(createtable) + + os.unsetenv('PGPASSWORD') + os.unsetenv('PGUSER') + + @_set_db_lock(lock_prefix='migration_tests-') + def _reset_databases(self): + for key, engine in self.engines.items(): + conn_string = self.test_databases[key] + conn_pieces = urlutils.urlparse(conn_string) + engine.dispose() + if conn_string.startswith('sqlite'): + # We can just delete the SQLite database, which is + # the easiest and cleanest solution + db_path = conn_pieces.path.strip('/') + if os.path.exists(db_path): + os.unlink(db_path) + # No need to recreate the SQLite DB. SQLite will + # create it for us if it's not there... + elif conn_string.startswith('mysql'): + # We can execute the MySQL client to destroy and re-create + # the MYSQL database, which is easier and less error-prone + # than using SQLAlchemy to do this via MetaData...trust me. + (user, password, database, host) = \ + utils.get_db_connection_info(conn_pieces) + sql = ("drop database if exists %(db)s; " + "create database %(db)s;") % {'db': database} + cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s " + "-e \"%(sql)s\"") % {'user': user, 'password': password, + 'host': host, 'sql': sql} + self.execute_cmd(cmd) + elif conn_string.startswith('postgresql'): + self._reset_pg(conn_pieces) + + +class WalkVersionsMixin(object): + def _walk_versions(self, engine=None, snake_walk=False, downgrade=True): + # Determine latest version script from the repo, then + # upgrade from 1 through to the latest, with no data + # in the databases. This just checks that the schema itself + # upgrades successfully. + + # Place the database under version control + self.migration_api.version_control(engine, self.REPOSITORY, + self.INIT_VERSION) + self.assertEqual(self.INIT_VERSION, + self.migration_api.db_version(engine, + self.REPOSITORY)) + + LOG.debug('latest version is %s' % self.REPOSITORY.latest) + versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1) + + for version in versions: + # upgrade -> downgrade -> upgrade + self._migrate_up(engine, version, with_data=True) + if snake_walk: + downgraded = self._migrate_down( + engine, version - 1, with_data=True) + if downgraded: + self._migrate_up(engine, version) + + if downgrade: + # Now walk it back down to 0 from the latest, testing + # the downgrade paths. + for version in reversed(versions): + # downgrade -> upgrade -> downgrade + downgraded = self._migrate_down(engine, version - 1) + + if snake_walk and downgraded: + self._migrate_up(engine, version) + self._migrate_down(engine, version - 1) + + def _migrate_down(self, engine, version, with_data=False): + try: + self.migration_api.downgrade(engine, self.REPOSITORY, version) + except NotImplementedError: + # NOTE(sirp): some migrations, namely release-level + # migrations, don't support a downgrade. + return False + + self.assertEqual( + version, self.migration_api.db_version(engine, self.REPOSITORY)) + + # NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target' + # version). So if we have any downgrade checks, they need to be run for + # the previous (higher numbered) migration. + if with_data: + post_downgrade = getattr( + self, "_post_downgrade_%03d" % (version + 1), None) + if post_downgrade: + post_downgrade(engine) + + return True + + def _migrate_up(self, engine, version, with_data=False): + """migrate up to a new version of the db. + + We allow for data insertion and post checks at every + migration version with special _pre_upgrade_### and + _check_### functions in the main test. + """ + # NOTE(sdague): try block is here because it's impossible to debug + # where a failed data migration happens otherwise + try: + if with_data: + data = None + pre_upgrade = getattr( + self, "_pre_upgrade_%03d" % version, None) + if pre_upgrade: + data = pre_upgrade(engine) + + self.migration_api.upgrade(engine, self.REPOSITORY, version) + self.assertEqual(version, + self.migration_api.db_version(engine, + self.REPOSITORY)) + if with_data: + check = getattr(self, "_check_%03d" % version, None) + if check: + check(engine, data) + except Exception: + LOG.error("Failed to migrate to version %s on engine %s" % + (version, engine)) + raise diff --git a/gceapi/openstack/common/db/sqlalchemy/utils.py b/gceapi/openstack/common/db/sqlalchemy/utils.py new file mode 100644 index 0000000..c5386aa --- /dev/null +++ b/gceapi/openstack/common/db/sqlalchemy/utils.py @@ -0,0 +1,548 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010-2011 OpenStack Foundation. +# Copyright 2012 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + +from migrate.changeset import UniqueConstraint +import sqlalchemy +from sqlalchemy import Boolean +from sqlalchemy import CheckConstraint +from sqlalchemy import Column +from sqlalchemy.engine import reflection +from sqlalchemy.ext.compiler import compiles +from sqlalchemy import func +from sqlalchemy import Index +from sqlalchemy import Integer +from sqlalchemy import MetaData +from sqlalchemy.sql.expression import literal_column +from sqlalchemy.sql.expression import UpdateBase +from sqlalchemy.sql import select +from sqlalchemy import String +from sqlalchemy import Table +from sqlalchemy.types import NullType + +from gceapi.openstack.common.gettextutils import _ + +from gceapi.openstack.common import log as logging +from gceapi.openstack.common import timeutils + + +LOG = logging.getLogger(__name__) + +_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+") + + +def sanitize_db_url(url): + match = _DBURL_REGEX.match(url) + if match: + return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):]) + return url + + +class InvalidSortKey(Exception): + message = _("Sort key supplied was not valid.") + + +# copy from glance/db/sqlalchemy/api.py +def paginate_query(query, model, limit, sort_keys, marker=None, + sort_dir=None, sort_dirs=None): + """Returns a query with sorting / pagination criteria added. + + Pagination works by requiring a unique sort_key, specified by sort_keys. + (If sort_keys is not unique, then we risk looping through values.) + We use the last row in the previous page as the 'marker' for pagination. + So we must return values that follow the passed marker in the order. + With a single-valued sort_key, this would be easy: sort_key > X. + With a compound-values sort_key, (k1, k2, k3) we must do this to repeat + the lexicographical ordering: + (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) + + We also have to cope with different sort_directions. + + Typically, the id of the last row is used as the client-facing pagination + marker, then the actual marker object must be fetched from the db and + passed in to us as marker. + + :param query: the query object to which we should add paging/sorting + :param model: the ORM model class + :param limit: maximum number of items to return + :param sort_keys: array of attributes by which results should be sorted + :param marker: the last item of the previous page; we returns the next + results after this value. + :param sort_dir: direction in which results should be sorted (asc, desc) + :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys + + :rtype: sqlalchemy.orm.query.Query + :return: The query with sorting/pagination added. + """ + + if 'id' not in sort_keys: + # TODO(justinsb): If this ever gives a false-positive, check + # the actual primary key, rather than assuming its id + LOG.warning(_('Id not in sort_keys; is sort_keys unique?')) + + assert(not (sort_dir and sort_dirs)) + + # Default the sort direction to ascending + if sort_dirs is None and sort_dir is None: + sort_dir = 'asc' + + # Ensure a per-column sort direction + if sort_dirs is None: + sort_dirs = [sort_dir for _sort_key in sort_keys] + + assert(len(sort_dirs) == len(sort_keys)) + + # Add sorting + for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): + try: + sort_dir_func = { + 'asc': sqlalchemy.asc, + 'desc': sqlalchemy.desc, + }[current_sort_dir] + except KeyError: + raise ValueError(_("Unknown sort direction, " + "must be 'desc' or 'asc'")) + try: + sort_key_attr = getattr(model, current_sort_key) + except AttributeError: + raise InvalidSortKey() + query = query.order_by(sort_dir_func(sort_key_attr)) + + # Add pagination + if marker is not None: + marker_values = [] + for sort_key in sort_keys: + v = getattr(marker, sort_key) + marker_values.append(v) + + # Build up an array of sort criteria as in the docstring + criteria_list = [] + for i in range(len(sort_keys)): + crit_attrs = [] + for j in range(i): + model_attr = getattr(model, sort_keys[j]) + crit_attrs.append((model_attr == marker_values[j])) + + model_attr = getattr(model, sort_keys[i]) + if sort_dirs[i] == 'desc': + crit_attrs.append((model_attr < marker_values[i])) + else: + crit_attrs.append((model_attr > marker_values[i])) + + criteria = sqlalchemy.sql.and_(*crit_attrs) + criteria_list.append(criteria) + + f = sqlalchemy.sql.or_(*criteria_list) + query = query.filter(f) + + if limit is not None: + query = query.limit(limit) + + return query + + +def get_table(engine, name): + """Returns an sqlalchemy table dynamically from db. + + Needed because the models don't work for us in migrations + as models will be far out of sync with the current data. + """ + metadata = MetaData() + metadata.bind = engine + return Table(name, metadata, autoload=True) + + +class InsertFromSelect(UpdateBase): + """Form the base for `INSERT INTO table (SELECT ... )` statement.""" + def __init__(self, table, select): + self.table = table + self.select = select + + +@compiles(InsertFromSelect) +def visit_insert_from_select(element, compiler, **kw): + """Form the `INSERT INTO table (SELECT ... )` statement.""" + return "INSERT INTO %s %s" % ( + compiler.process(element.table, asfrom=True), + compiler.process(element.select)) + + +class ColumnError(Exception): + """Error raised when no column or an invalid column is found.""" + + +def _get_not_supported_column(col_name_col_instance, column_name): + try: + column = col_name_col_instance[column_name] + except KeyError: + msg = _("Please specify column %s in col_name_col_instance " + "param. It is required because column has unsupported " + "type by sqlite).") + raise ColumnError(msg % column_name) + + if not isinstance(column, Column): + msg = _("col_name_col_instance param has wrong type of " + "column instance for column %s It should be instance " + "of sqlalchemy.Column.") + raise ColumnError(msg % column_name) + return column + + +def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns, + **col_name_col_instance): + """Drop unique constraint from table. + + This method drops UC from table and works for mysql, postgresql and sqlite. + In mysql and postgresql we are able to use "alter table" construction. + Sqlalchemy doesn't support some sqlite column types and replaces their + type with NullType in metadata. We process these columns and replace + NullType with the correct column type. + + :param migrate_engine: sqlalchemy engine + :param table_name: name of table that contains uniq constraint. + :param uc_name: name of uniq constraint that will be dropped. + :param columns: columns that are in uniq constraint. + :param col_name_col_instance: contains pair column_name=column_instance. + column_instance is instance of Column. These params + are required only for columns that have unsupported + types by sqlite. For example BigInteger. + """ + + meta = MetaData() + meta.bind = migrate_engine + t = Table(table_name, meta, autoload=True) + + if migrate_engine.name == "sqlite": + override_cols = [ + _get_not_supported_column(col_name_col_instance, col.name) + for col in t.columns + if isinstance(col.type, NullType) + ] + for col in override_cols: + t.columns.replace(col) + + uc = UniqueConstraint(*columns, table=t, name=uc_name) + uc.drop() + + +def drop_old_duplicate_entries_from_table(migrate_engine, table_name, + use_soft_delete, *uc_column_names): + """Drop all old rows having the same values for columns in uc_columns. + + This method drop (or mark ad `deleted` if use_soft_delete is True) old + duplicate rows form table with name `table_name`. + + :param migrate_engine: Sqlalchemy engine + :param table_name: Table with duplicates + :param use_soft_delete: If True - values will be marked as `deleted`, + if False - values will be removed from table + :param uc_column_names: Unique constraint columns + """ + meta = MetaData() + meta.bind = migrate_engine + + table = Table(table_name, meta, autoload=True) + columns_for_group_by = [table.c[name] for name in uc_column_names] + + columns_for_select = [func.max(table.c.id)] + columns_for_select.extend(columns_for_group_by) + + duplicated_rows_select = select(columns_for_select, + group_by=columns_for_group_by, + having=func.count(table.c.id) > 1) + + for row in migrate_engine.execute(duplicated_rows_select): + # NOTE(boris-42): Do not remove row that has the biggest ID. + delete_condition = table.c.id != row[0] + is_none = None # workaround for pyflakes + delete_condition &= table.c.deleted_at == is_none + for name in uc_column_names: + delete_condition &= table.c[name] == row[name] + + rows_to_delete_select = select([table.c.id]).where(delete_condition) + for row in migrate_engine.execute(rows_to_delete_select).fetchall(): + LOG.info(_("Deleting duplicated row with id: %(id)s from table: " + "%(table)s") % dict(id=row[0], table=table_name)) + + if use_soft_delete: + delete_statement = table.update().\ + where(delete_condition).\ + values({ + 'deleted': literal_column('id'), + 'updated_at': literal_column('updated_at'), + 'deleted_at': timeutils.utcnow() + }) + else: + delete_statement = table.delete().where(delete_condition) + migrate_engine.execute(delete_statement) + + +def _get_default_deleted_value(table): + if isinstance(table.c.id.type, Integer): + return 0 + if isinstance(table.c.id.type, String): + return "" + raise ColumnError(_("Unsupported id columns type")) + + +def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes): + table = get_table(migrate_engine, table_name) + + insp = reflection.Inspector.from_engine(migrate_engine) + real_indexes = insp.get_indexes(table_name) + existing_index_names = dict( + [(index['name'], index['column_names']) for index in real_indexes]) + + # NOTE(boris-42): Restore indexes on `deleted` column + for index in indexes: + if 'deleted' not in index['column_names']: + continue + name = index['name'] + if name in existing_index_names: + column_names = [table.c[c] for c in existing_index_names[name]] + old_index = Index(name, *column_names, unique=index["unique"]) + old_index.drop(migrate_engine) + + column_names = [table.c[c] for c in index['column_names']] + new_index = Index(index["name"], *column_names, unique=index["unique"]) + new_index.create(migrate_engine) + + +def change_deleted_column_type_to_boolean(migrate_engine, table_name, + **col_name_col_instance): + if migrate_engine.name == "sqlite": + return _change_deleted_column_type_to_boolean_sqlite( + migrate_engine, table_name, **col_name_col_instance) + insp = reflection.Inspector.from_engine(migrate_engine) + indexes = insp.get_indexes(table_name) + + table = get_table(migrate_engine, table_name) + + old_deleted = Column('old_deleted', Boolean, default=False) + old_deleted.create(table, populate_default=False) + + table.update().\ + where(table.c.deleted == table.c.id).\ + values(old_deleted=True).\ + execute() + + table.c.deleted.drop() + table.c.old_deleted.alter(name="deleted") + + _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) + + +def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name, + **col_name_col_instance): + insp = reflection.Inspector.from_engine(migrate_engine) + table = get_table(migrate_engine, table_name) + + columns = [] + for column in table.columns: + column_copy = None + if column.name != "deleted": + if isinstance(column.type, NullType): + column_copy = _get_not_supported_column(col_name_col_instance, + column.name) + else: + column_copy = column.copy() + else: + column_copy = Column('deleted', Boolean, default=0) + columns.append(column_copy) + + constraints = [constraint.copy() for constraint in table.constraints] + + meta = table.metadata + new_table = Table(table_name + "__tmp__", meta, + *(columns + constraints)) + new_table.create() + + indexes = [] + for index in insp.get_indexes(table_name): + column_names = [new_table.c[c] for c in index['column_names']] + indexes.append(Index(index["name"], *column_names, + unique=index["unique"])) + + c_select = [] + for c in table.c: + if c.name != "deleted": + c_select.append(c) + else: + c_select.append(table.c.deleted == table.c.id) + + ins = InsertFromSelect(new_table, select(c_select)) + migrate_engine.execute(ins) + + table.drop() + [index.create(migrate_engine) for index in indexes] + + new_table.rename(table_name) + new_table.update().\ + where(new_table.c.deleted == new_table.c.id).\ + values(deleted=True).\ + execute() + + +def change_deleted_column_type_to_id_type(migrate_engine, table_name, + **col_name_col_instance): + if migrate_engine.name == "sqlite": + return _change_deleted_column_type_to_id_type_sqlite( + migrate_engine, table_name, **col_name_col_instance) + insp = reflection.Inspector.from_engine(migrate_engine) + indexes = insp.get_indexes(table_name) + + table = get_table(migrate_engine, table_name) + + new_deleted = Column('new_deleted', table.c.id.type, + default=_get_default_deleted_value(table)) + new_deleted.create(table, populate_default=True) + + deleted = True # workaround for pyflakes + table.update().\ + where(table.c.deleted == deleted).\ + values(new_deleted=table.c.id).\ + execute() + table.c.deleted.drop() + table.c.new_deleted.alter(name="deleted") + + _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) + + +def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name, + **col_name_col_instance): + # NOTE(boris-42): sqlaclhemy-migrate can't drop column with check + # constraints in sqlite DB and our `deleted` column has + # 2 check constraints. So there is only one way to remove + # these constraints: + # 1) Create new table with the same columns, constraints + # and indexes. (except deleted column). + # 2) Copy all data from old to new table. + # 3) Drop old table. + # 4) Rename new table to old table name. + insp = reflection.Inspector.from_engine(migrate_engine) + meta = MetaData(bind=migrate_engine) + table = Table(table_name, meta, autoload=True) + default_deleted_value = _get_default_deleted_value(table) + + columns = [] + for column in table.columns: + column_copy = None + if column.name != "deleted": + if isinstance(column.type, NullType): + column_copy = _get_not_supported_column(col_name_col_instance, + column.name) + else: + column_copy = column.copy() + else: + column_copy = Column('deleted', table.c.id.type, + default=default_deleted_value) + columns.append(column_copy) + + def is_deleted_column_constraint(constraint): + # NOTE(boris-42): There is no other way to check is CheckConstraint + # associated with deleted column. + if not isinstance(constraint, CheckConstraint): + return False + sqltext = str(constraint.sqltext) + return (sqltext.endswith("deleted in (0, 1)") or + sqltext.endswith("deleted IN (:deleted_1, :deleted_2)")) + + constraints = [] + for constraint in table.constraints: + if not is_deleted_column_constraint(constraint): + constraints.append(constraint.copy()) + + new_table = Table(table_name + "__tmp__", meta, + *(columns + constraints)) + new_table.create() + + indexes = [] + for index in insp.get_indexes(table_name): + column_names = [new_table.c[c] for c in index['column_names']] + indexes.append(Index(index["name"], *column_names, + unique=index["unique"])) + + ins = InsertFromSelect(new_table, table.select()) + migrate_engine.execute(ins) + + table.drop() + [index.create(migrate_engine) for index in indexes] + + new_table.rename(table_name) + deleted = True # workaround for pyflakes + new_table.update().\ + where(new_table.c.deleted == deleted).\ + values(deleted=new_table.c.id).\ + execute() + + # NOTE(boris-42): Fix value of deleted column: False -> "" or 0. + deleted = False # workaround for pyflakes + new_table.update().\ + where(new_table.c.deleted == deleted).\ + values(deleted=default_deleted_value).\ + execute() + + +def get_connect_string(backend, database, user=None, passwd=None): + """Get database connection + + Try to get a connection with a very specific set of values, if we get + these then we'll run the tests, otherwise they are skipped + """ + args = {'backend': backend, + 'user': user, + 'passwd': passwd, + 'database': database} + if backend == 'sqlite': + template = '%(backend)s:///%(database)s' + else: + template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" + return template % args + + +def is_backend_avail(backend, database, user=None, passwd=None): + try: + connect_uri = get_connect_string(backend=backend, + database=database, + user=user, + passwd=passwd) + engine = sqlalchemy.create_engine(connect_uri) + connection = engine.connect() + except Exception: + # intentionally catch all to handle exceptions even if we don't + # have any backend code loaded. + return False + else: + connection.close() + engine.dispose() + return True + + +def get_db_connection_info(conn_pieces): + database = conn_pieces.path.strip('/') + loc_pieces = conn_pieces.netloc.split('@') + host = loc_pieces[1] + + auth_pieces = loc_pieces[0].split(':') + user = auth_pieces[0] + password = "" + if len(auth_pieces) > 1: + password = auth_pieces[1].strip() + + return (user, password, database, host) diff --git a/gceapi/openstack/common/eventlet_backdoor.py b/gceapi/openstack/common/eventlet_backdoor.py new file mode 100644 index 0000000..136cbd7 --- /dev/null +++ b/gceapi/openstack/common/eventlet_backdoor.py @@ -0,0 +1,144 @@ +# Copyright (c) 2012 OpenStack Foundation. +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import errno +import gc +import os +import pprint +import socket +import sys +import traceback + +import eventlet +import eventlet.backdoor +import greenlet +from oslo.config import cfg + +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import log as logging + +help_for_backdoor_port = ( + "Acceptable values are 0, , and :, where 0 results " + "in listening on a random tcp port number; results in listening " + "on the specified port number (and not enabling backdoor if that port " + "is in use); and : results in listening on the smallest " + "unused port number within the specified range of port numbers. The " + "chosen port is displayed in the service's log file.") +eventlet_backdoor_opts = [ + cfg.StrOpt('backdoor_port', + default=None, + help="Enable eventlet backdoor. %s" % help_for_backdoor_port) +] + +CONF = cfg.CONF +CONF.register_opts(eventlet_backdoor_opts) +LOG = logging.getLogger(__name__) + + +class EventletBackdoorConfigValueError(Exception): + def __init__(self, port_range, help_msg, ex): + msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. ' + '%(help)s' % + {'range': port_range, 'ex': ex, 'help': help_msg}) + super(EventletBackdoorConfigValueError, self).__init__(msg) + self.port_range = port_range + + +def _dont_use_this(): + print("Don't use this, just disconnect instead") + + +def _find_objects(t): + return [o for o in gc.get_objects() if isinstance(o, t)] + + +def _print_greenthreads(): + for i, gt in enumerate(_find_objects(greenlet.greenlet)): + print(i, gt) + traceback.print_stack(gt.gr_frame) + print() + + +def _print_nativethreads(): + for threadId, stack in sys._current_frames().items(): + print(threadId) + traceback.print_stack(stack) + print() + + +def _parse_port_range(port_range): + if ':' not in port_range: + start, end = port_range, port_range + else: + start, end = port_range.split(':', 1) + try: + start, end = int(start), int(end) + if end < start: + raise ValueError + return start, end + except ValueError as ex: + raise EventletBackdoorConfigValueError(port_range, ex, + help_for_backdoor_port) + + +def _listen(host, start_port, end_port, listen_func): + try_port = start_port + while True: + try: + return listen_func((host, try_port)) + except socket.error as exc: + if (exc.errno != errno.EADDRINUSE or + try_port >= end_port): + raise + try_port += 1 + + +def initialize_if_enabled(): + backdoor_locals = { + 'exit': _dont_use_this, # So we don't exit the entire process + 'quit': _dont_use_this, # So we don't exit the entire process + 'fo': _find_objects, + 'pgt': _print_greenthreads, + 'pnt': _print_nativethreads, + } + + if CONF.backdoor_port is None: + return None + + start_port, end_port = _parse_port_range(str(CONF.backdoor_port)) + + # NOTE(johannes): The standard sys.displayhook will print the value of + # the last expression and set it to __builtin__._, which overwrites + # the __builtin__._ that gettext sets. Let's switch to using pprint + # since it won't interact poorly with gettext, and it's easier to + # read the output too. + def displayhook(val): + if val is not None: + pprint.pprint(val) + sys.displayhook = displayhook + + sock = _listen('localhost', start_port, end_port, eventlet.listen) + + # In the case of backdoor port being zero, a port number is assigned by + # listen(). In any case, pull the port number out here. + port = sock.getsockname()[1] + LOG.info(_('Eventlet backdoor listening on %(port)s for process %(pid)d') % + {'port': port, 'pid': os.getpid()}) + eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, + locals=backdoor_locals) + return port diff --git a/gceapi/openstack/common/excutils.py b/gceapi/openstack/common/excutils.py new file mode 100644 index 0000000..5a7cf14 --- /dev/null +++ b/gceapi/openstack/common/excutils.py @@ -0,0 +1,99 @@ +# Copyright 2011 OpenStack Foundation. +# Copyright 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Exception related utilities. +""" + +import logging +import sys +import time +import traceback + +import six + +from gceapi.openstack.common.gettextutils import _ + + +class save_and_reraise_exception(object): + """Save current exception, run some code and then re-raise. + + In some cases the exception context can be cleared, resulting in None + being attempted to be re-raised after an exception handler is run. This + can happen when eventlet switches greenthreads or when running an + exception handler, code raises and catches an exception. In both + cases the exception context will be cleared. + + To work around this, we save the exception state, run handler code, and + then re-raise the original exception. If another exception occurs, the + saved exception is logged and the new exception is re-raised. + + In some cases the caller may not want to re-raise the exception, and + for those circumstances this context provides a reraise flag that + can be used to suppress the exception. For example:: + + except Exception: + with save_and_reraise_exception() as ctxt: + decide_if_need_reraise() + if not should_be_reraised: + ctxt.reraise = False + """ + def __init__(self): + self.reraise = True + + def __enter__(self): + self.type_, self.value, self.tb, = sys.exc_info() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + logging.error(_('Original exception being dropped: %s'), + traceback.format_exception(self.type_, + self.value, + self.tb)) + return False + if self.reraise: + six.reraise(self.type_, self.value, self.tb) + + +def forever_retry_uncaught_exceptions(infunc): + def inner_func(*args, **kwargs): + last_log_time = 0 + last_exc_message = None + exc_count = 0 + while True: + try: + return infunc(*args, **kwargs) + except Exception as exc: + this_exc_message = six.u(str(exc)) + if this_exc_message == last_exc_message: + exc_count += 1 + else: + exc_count = 1 + # Do not log any more frequently than once a minute unless + # the exception message changes + cur_time = int(time.time()) + if (cur_time - last_log_time > 60 or + this_exc_message != last_exc_message): + logging.exception( + _('Unexpected exception occurred %d time(s)... ' + 'retrying.') % exc_count) + last_log_time = cur_time + last_exc_message = this_exc_message + exc_count = 0 + # This should be a very rare event. In case it isn't, do + # a sleep. + time.sleep(1) + return inner_func diff --git a/gceapi/openstack/common/gettextutils.py b/gceapi/openstack/common/gettextutils.py new file mode 100644 index 0000000..b5c245f --- /dev/null +++ b/gceapi/openstack/common/gettextutils.py @@ -0,0 +1,440 @@ +# Copyright 2012 Red Hat, Inc. +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +gettext for openstack-common modules. + +Usual usage in an openstack.common module: + + from gceapi.openstack.common.gettextutils import _ +""" + +import copy +import gettext +import locale +from logging import handlers +import os +import re + +from babel import localedata +import six + +_localedir = os.environ.get('gceapi'.upper() + '_LOCALEDIR') +_t = gettext.translation('gceapi', localedir=_localedir, fallback=True) + +_AVAILABLE_LANGUAGES = {} +USE_LAZY = False + + +def enable_lazy(): + """Convenience function for configuring _() to use lazy gettext + + Call this at the start of execution to enable the gettextutils._ + function to use lazy gettext functionality. This is useful if + your project is importing _ directly instead of using the + gettextutils.install() way of importing the _ function. + """ + global USE_LAZY + USE_LAZY = True + + +def _(msg): + if USE_LAZY: + return Message(msg, domain='gceapi') + else: + if six.PY3: + return _t.gettext(msg) + return _t.ugettext(msg) + + +def install(domain, lazy=False): + """Install a _() function using the given translation domain. + + Given a translation domain, install a _() function using gettext's + install() function. + + The main difference from gettext.install() is that we allow + overriding the default localedir (e.g. /usr/share/locale) using + a translation-domain-specific environment variable (e.g. + NOVA_LOCALEDIR). + + :param domain: the translation domain + :param lazy: indicates whether or not to install the lazy _() function. + The lazy _() introduces a way to do deferred translation + of messages by installing a _ that builds Message objects, + instead of strings, which can then be lazily translated into + any available locale. + """ + if lazy: + # NOTE(mrodden): Lazy gettext functionality. + # + # The following introduces a deferred way to do translations on + # messages in OpenStack. We override the standard _() function + # and % (format string) operation to build Message objects that can + # later be translated when we have more information. + def _lazy_gettext(msg): + """Create and return a Message object. + + Lazy gettext function for a given domain, it is a factory method + for a project/module to get a lazy gettext function for its own + translation domain (i.e. nova, glance, cinder, etc.) + + Message encapsulates a string so that we can translate + it later when needed. + """ + return Message(msg, domain=domain) + + from six import moves + moves.builtins.__dict__['_'] = _lazy_gettext + else: + localedir = '%s_LOCALEDIR' % domain.upper() + if six.PY3: + gettext.install(domain, + localedir=os.environ.get(localedir)) + else: + gettext.install(domain, + localedir=os.environ.get(localedir), + unicode=True) + + +class Message(six.text_type): + """A Message object is a unicode object that can be translated. + + Translation of Message is done explicitly using the translate() method. + For all non-translation intents and purposes, a Message is simply unicode, + and can be treated as such. + """ + + def __new__(cls, msgid, msgtext=None, params=None, + domain='gceapi', *args): + """Create a new Message object. + + In order for translation to work gettext requires a message ID, this + msgid will be used as the base unicode text. It is also possible + for the msgid and the base unicode text to be different by passing + the msgtext parameter. + """ + # If the base msgtext is not given, we use the default translation + # of the msgid (which is in English) just in case the system locale is + # not English, so that the base text will be in that locale by default. + if not msgtext: + msgtext = Message._translate_msgid(msgid, domain) + # We want to initialize the parent unicode with the actual object that + # would have been plain unicode if 'Message' was not enabled. + msg = super(Message, cls).__new__(cls, msgtext) + msg.msgid = msgid + msg.domain = domain + msg.params = params + return msg + + def translate(self, desired_locale=None): + """Translate this message to the desired locale. + + :param desired_locale: The desired locale to translate the message to, + if no locale is provided the message will be + translated to the system's default locale. + + :returns: the translated message in unicode + """ + + translated_message = Message._translate_msgid(self.msgid, + self.domain, + desired_locale) + if self.params is None: + # No need for more translation + return translated_message + + # This Message object may have been formatted with one or more + # Message objects as substitution arguments, given either as a single + # argument, part of a tuple, or as one or more values in a dictionary. + # When translating this Message we need to translate those Messages too + translated_params = _translate_args(self.params, desired_locale) + + translated_message = translated_message % translated_params + + return translated_message + + @staticmethod + def _translate_msgid(msgid, domain, desired_locale=None): + if not desired_locale: + system_locale = locale.getdefaultlocale() + # If the system locale is not available to the runtime use English + if not system_locale[0]: + desired_locale = 'en_US' + else: + desired_locale = system_locale[0] + + locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR') + lang = gettext.translation(domain, + localedir=locale_dir, + languages=[desired_locale], + fallback=True) + if six.PY3: + translator = lang.gettext + else: + translator = lang.ugettext + + translated_message = translator(msgid) + return translated_message + + def __mod__(self, other): + # When we mod a Message we want the actual operation to be performed + # by the parent class (i.e. unicode()), the only thing we do here is + # save the original msgid and the parameters in case of a translation + params = self._sanitize_mod_params(other) + unicode_mod = super(Message, self).__mod__(params) + modded = Message(self.msgid, + msgtext=unicode_mod, + params=params, + domain=self.domain) + return modded + + def _sanitize_mod_params(self, other): + """Sanitize the object being modded with this Message. + + - Add support for modding 'None' so translation supports it + - Trim the modded object, which can be a large dictionary, to only + those keys that would actually be used in a translation + - Snapshot the object being modded, in case the message is + translated, it will be used as it was when the Message was created + """ + if other is None: + params = (other,) + elif isinstance(other, dict): + params = self._trim_dictionary_parameters(other) + else: + params = self._copy_param(other) + return params + + def _trim_dictionary_parameters(self, dict_param): + """Return a dict that only has matching entries in the msgid.""" + # NOTE(luisg): Here we trim down the dictionary passed as parameters + # to avoid carrying a lot of unnecessary weight around in the message + # object, for example if someone passes in Message() % locals() but + # only some params are used, and additionally we prevent errors for + # non-deepcopyable objects by unicoding() them. + + # Look for %(param) keys in msgid; + # Skip %% and deal with the case where % is first character on the line + keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid) + + # If we don't find any %(param) keys but have a %s + if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid): + # Apparently the full dictionary is the parameter + params = self._copy_param(dict_param) + else: + params = {} + # Save our existing parameters as defaults to protect + # ourselves from losing values if we are called through an + # (erroneous) chain that builds a valid Message with + # arguments, and then does something like "msg % kwds" + # where kwds is an empty dictionary. + src = {} + if isinstance(self.params, dict): + src.update(self.params) + src.update(dict_param) + for key in keys: + params[key] = self._copy_param(src[key]) + + return params + + def _copy_param(self, param): + try: + return copy.deepcopy(param) + except TypeError: + # Fallback to casting to unicode this will handle the + # python code-like objects that can't be deep-copied + return six.text_type(param) + + def __add__(self, other): + msg = _('Message objects do not support addition.') + raise TypeError(msg) + + def __radd__(self, other): + return self.__add__(other) + + def __str__(self): + # NOTE(luisg): Logging in python 2.6 tries to str() log records, + # and it expects specifically a UnicodeError in order to proceed. + msg = _('Message objects do not support str() because they may ' + 'contain non-ascii characters. ' + 'Please use unicode() or translate() instead.') + raise UnicodeError(msg) + + +def get_available_languages(domain): + """Lists the available languages for the given translation domain. + + :param domain: the domain to get languages for + """ + if domain in _AVAILABLE_LANGUAGES: + return copy.copy(_AVAILABLE_LANGUAGES[domain]) + + localedir = '%s_LOCALEDIR' % domain.upper() + find = lambda x: gettext.find(domain, + localedir=os.environ.get(localedir), + languages=[x]) + + # NOTE(mrodden): en_US should always be available (and first in case + # order matters) since our in-line message strings are en_US + language_list = ['en_US'] + # NOTE(luisg): Babel <1.0 used a function called list(), which was + # renamed to locale_identifiers() in >=1.0, the requirements master list + # requires >=0.9.6, uncapped, so defensively work with both. We can remove + # this check when the master list updates to >=1.0, and update all projects + list_identifiers = (getattr(localedata, 'list', None) or + getattr(localedata, 'locale_identifiers')) + locale_identifiers = list_identifiers() + + for i in locale_identifiers: + if find(i) is not None: + language_list.append(i) + + # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported + # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they + # are perfectly legitimate locales: + # https://github.com/mitsuhiko/babel/issues/37 + # In Babel 1.3 they fixed the bug and they support these locales, but + # they are still not explicitly "listed" by locale_identifiers(). + # That is why we add the locales here explicitly if necessary so that + # they are listed as supported. + aliases = {'zh': 'zh_CN', + 'zh_Hant_HK': 'zh_HK', + 'zh_Hant': 'zh_TW', + 'fil': 'tl_PH'} + for (locale, alias) in six.iteritems(aliases): + if locale in language_list and alias not in language_list: + language_list.append(alias) + + _AVAILABLE_LANGUAGES[domain] = language_list + return copy.copy(language_list) + + +def translate(obj, desired_locale=None): + """Gets the translated unicode representation of the given object. + + If the object is not translatable it is returned as-is. + If the locale is None the object is translated to the system locale. + + :param obj: the object to translate + :param desired_locale: the locale to translate the message to, if None the + default system locale will be used + :returns: the translated object in unicode, or the original object if + it could not be translated + """ + message = obj + if not isinstance(message, Message): + # If the object to translate is not already translatable, + # let's first get its unicode representation + message = six.text_type(obj) + if isinstance(message, Message): + # Even after unicoding() we still need to check if we are + # running with translatable unicode before translating + return message.translate(desired_locale) + return obj + + +def _translate_args(args, desired_locale=None): + """Translates all the translatable elements of the given arguments object. + + This method is used for translating the translatable values in method + arguments which include values of tuples or dictionaries. + If the object is not a tuple or a dictionary the object itself is + translated if it is translatable. + + If the locale is None the object is translated to the system locale. + + :param args: the args to translate + :param desired_locale: the locale to translate the args to, if None the + default system locale will be used + :returns: a new args object with the translated contents of the original + """ + if isinstance(args, tuple): + return tuple(translate(v, desired_locale) for v in args) + if isinstance(args, dict): + translated_dict = {} + for (k, v) in six.iteritems(args): + translated_v = translate(v, desired_locale) + translated_dict[k] = translated_v + return translated_dict + return translate(args, desired_locale) + + +class TranslationHandler(handlers.MemoryHandler): + """Handler that translates records before logging them. + + The TranslationHandler takes a locale and a target logging.Handler object + to forward LogRecord objects to after translating them. This handler + depends on Message objects being logged, instead of regular strings. + + The handler can be configured declaratively in the logging.conf as follows: + + [handlers] + keys = translatedlog, translator + + [handler_translatedlog] + class = handlers.WatchedFileHandler + args = ('/var/log/api-localized.log',) + formatter = context + + [handler_translator] + class = openstack.common.log.TranslationHandler + target = translatedlog + args = ('zh_CN',) + + If the specified locale is not available in the system, the handler will + log in the default locale. + """ + + def __init__(self, locale=None, target=None): + """Initialize a TranslationHandler + + :param locale: locale to use for translating messages + :param target: logging.Handler object to forward + LogRecord objects to after translation + """ + # NOTE(luisg): In order to allow this handler to be a wrapper for + # other handlers, such as a FileHandler, and still be able to + # configure it using logging.conf, this handler has to extend + # MemoryHandler because only the MemoryHandlers' logging.conf + # parsing is implemented such that it accepts a target handler. + handlers.MemoryHandler.__init__(self, capacity=0, target=target) + self.locale = locale + + def setFormatter(self, fmt): + self.target.setFormatter(fmt) + + def emit(self, record): + # We save the message from the original record to restore it + # after translation, so other handlers are not affected by this + original_msg = record.msg + original_args = record.args + + try: + self._translate_and_log_record(record) + finally: + record.msg = original_msg + record.args = original_args + + def _translate_and_log_record(self, record): + record.msg = translate(record.msg, self.locale) + + # In addition to translating the message, we also need to translate + # arguments that were passed to the log method that were not part + # of the main message e.g., log.info(_('Some message %s'), this_one)) + record.args = _translate_args(record.args, self.locale) + + self.target.emit(record) diff --git a/gceapi/openstack/common/importutils.py b/gceapi/openstack/common/importutils.py new file mode 100644 index 0000000..4fd9ae2 --- /dev/null +++ b/gceapi/openstack/common/importutils.py @@ -0,0 +1,66 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Import related utilities and helper functions. +""" + +import sys +import traceback + + +def import_class(import_str): + """Returns a class from a string including module and class.""" + mod_str, _sep, class_str = import_str.rpartition('.') + try: + __import__(mod_str) + return getattr(sys.modules[mod_str], class_str) + except (ValueError, AttributeError): + raise ImportError('Class %s cannot be found (%s)' % + (class_str, + traceback.format_exception(*sys.exc_info()))) + + +def import_object(import_str, *args, **kwargs): + """Import a class and return an instance of it.""" + return import_class(import_str)(*args, **kwargs) + + +def import_object_ns(name_space, import_str, *args, **kwargs): + """Tries to import object from default namespace. + + Imports a class and return an instance of it, first by trying + to find the class in a default namespace, then failing back to + a full path if not found in the default namespace. + """ + import_value = "%s.%s" % (name_space, import_str) + try: + return import_class(import_value)(*args, **kwargs) + except ImportError: + return import_class(import_str)(*args, **kwargs) + + +def import_module(import_str): + """Import a module.""" + __import__(import_str) + return sys.modules[import_str] + + +def try_import(import_str, default=None): + """Try to import a module and if it fails return default.""" + try: + return import_module(import_str) + except ImportError: + return default diff --git a/gceapi/openstack/common/jsonutils.py b/gceapi/openstack/common/jsonutils.py new file mode 100644 index 0000000..a4ab5b3 --- /dev/null +++ b/gceapi/openstack/common/jsonutils.py @@ -0,0 +1,182 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +''' +JSON related utilities. + +This module provides a few things: + + 1) A handy function for getting an object down to something that can be + JSON serialized. See to_primitive(). + + 2) Wrappers around loads() and dumps(). The dumps() wrapper will + automatically use to_primitive() for you if needed. + + 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson + is available. +''' + + +import datetime +import functools +import inspect +import itertools +import json +try: + import xmlrpclib +except ImportError: + # NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3 + # however the function and object call signatures + # remained the same. This whole try/except block should + # be removed and replaced with a call to six.moves once + # six 1.4.2 is released. See http://bit.ly/1bqrVzu + import xmlrpc.client as xmlrpclib + +import six + +from gceapi.openstack.common import gettextutils +from gceapi.openstack.common import importutils +from gceapi.openstack.common import timeutils + +netaddr = importutils.try_import("netaddr") + +_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod, + inspect.isfunction, inspect.isgeneratorfunction, + inspect.isgenerator, inspect.istraceback, inspect.isframe, + inspect.iscode, inspect.isbuiltin, inspect.isroutine, + inspect.isabstract] + +_simple_types = (six.string_types + six.integer_types + + (type(None), bool, float)) + + +def to_primitive(value, convert_instances=False, convert_datetime=True, + level=0, max_depth=3): + """Convert a complex object into primitives. + + Handy for JSON serialization. We can optionally handle instances, + but since this is a recursive function, we could have cyclical + data structures. + + To handle cyclical data structures we could track the actual objects + visited in a set, but not all objects are hashable. Instead we just + track the depth of the object inspections and don't go too deep. + + Therefore, convert_instances=True is lossy ... be aware. + + """ + # handle obvious types first - order of basic types determined by running + # full tests on nova project, resulting in the following counts: + # 572754 + # 460353 + # 379632 + # 274610 + # 199918 + # 114200 + # 51817 + # 26164 + # 6491 + # 283 + # 19 + if isinstance(value, _simple_types): + return value + + if isinstance(value, datetime.datetime): + if convert_datetime: + return timeutils.strtime(value) + else: + return value + + # value of itertools.count doesn't get caught by nasty_type_tests + # and results in infinite loop when list(value) is called. + if type(value) == itertools.count: + return six.text_type(value) + + # FIXME(vish): Workaround for LP bug 852095. Without this workaround, + # tests that raise an exception in a mocked method that + # has a @wrap_exception with a notifier will fail. If + # we up the dependency to 0.5.4 (when it is released) we + # can remove this workaround. + if getattr(value, '__module__', None) == 'mox': + return 'mock' + + if level > max_depth: + return '?' + + # The try block may not be necessary after the class check above, + # but just in case ... + try: + recursive = functools.partial(to_primitive, + convert_instances=convert_instances, + convert_datetime=convert_datetime, + level=level, + max_depth=max_depth) + if isinstance(value, dict): + return dict((k, recursive(v)) for k, v in six.iteritems(value)) + elif isinstance(value, (list, tuple)): + return [recursive(lv) for lv in value] + + # It's not clear why xmlrpclib created their own DateTime type, but + # for our purposes, make it a datetime type which is explicitly + # handled + if isinstance(value, xmlrpclib.DateTime): + value = datetime.datetime(*tuple(value.timetuple())[:6]) + + if convert_datetime and isinstance(value, datetime.datetime): + return timeutils.strtime(value) + elif isinstance(value, gettextutils.Message): + return value.data + elif hasattr(value, 'iteritems'): + return recursive(dict(value.iteritems()), level=level + 1) + elif hasattr(value, '__iter__'): + return recursive(list(value)) + elif convert_instances and hasattr(value, '__dict__'): + # Likely an instance of something. Watch for cycles. + # Ignore class member vars. + return recursive(value.__dict__, level=level + 1) + elif netaddr and isinstance(value, netaddr.IPAddress): + return six.text_type(value) + else: + if any(test(value) for test in _nasty_type_tests): + return six.text_type(value) + return value + except TypeError: + # Class objects are tricky since they may define something like + # __iter__ defined but it isn't callable as list(). + return six.text_type(value) + + +def dumps(value, default=to_primitive, **kwargs): + return json.dumps(value, default=default, **kwargs) + + +def loads(s): + return json.loads(s) + + +def load(s): + return json.load(s) + + +try: + import anyjson +except ImportError: + pass +else: + anyjson._modules.append((__name__, 'dumps', TypeError, + 'loads', ValueError, 'load')) + anyjson.force_implementation(__name__) diff --git a/gceapi/openstack/common/local.py b/gceapi/openstack/common/local.py new file mode 100644 index 0000000..0819d5b --- /dev/null +++ b/gceapi/openstack/common/local.py @@ -0,0 +1,45 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Local storage of variables using weak references""" + +import threading +import weakref + + +class WeakLocal(threading.local): + def __getattribute__(self, attr): + rval = super(WeakLocal, self).__getattribute__(attr) + if rval: + # NOTE(mikal): this bit is confusing. What is stored is a weak + # reference, not the value itself. We therefore need to lookup + # the weak reference and return the inner value here. + rval = rval() + return rval + + def __setattr__(self, attr, value): + value = weakref.ref(value) + return super(WeakLocal, self).__setattr__(attr, value) + + +# NOTE(mikal): the name "store" should be deprecated in the future +store = WeakLocal() + +# A "weak" store uses weak references and allows an object to fall out of scope +# when it falls out of scope in the code that uses the thread local storage. A +# "strong" store will hold a reference to the object so that it never falls out +# of scope. +weak_store = WeakLocal() +strong_store = threading.local() diff --git a/gceapi/openstack/common/log.py b/gceapi/openstack/common/log.py new file mode 100644 index 0000000..cd3769e --- /dev/null +++ b/gceapi/openstack/common/log.py @@ -0,0 +1,657 @@ +# Copyright 2011 OpenStack Foundation. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Openstack logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. Additionally, an instance uuid +may be passed as part of the log message, which is intended to make it easier +for admins to find messages related to a specific instance. + +It also allows setting of formatting information through conf. + +""" + +import inspect +import itertools +import logging +import logging.config +import logging.handlers +import os +import re +import sys +import traceback + +from oslo.config import cfg +import six +from six import moves + +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import importutils +from gceapi.openstack.common import jsonutils +from gceapi.openstack.common import local + + +_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + +_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] + +# NOTE(ldbragst): Let's build a list of regex objects using the list of +# _SANITIZE_KEYS we already have. This way, we only have to add the new key +# to the list of _SANITIZE_KEYS and we can generate regular expressions +# for XML and JSON automatically. +_SANITIZE_PATTERNS = [] +_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', + r'(<%(key)s>).*?()', + r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', + r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])'] + +for key in _SANITIZE_KEYS: + for pattern in _FORMAT_PATTERNS: + reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) + _SANITIZE_PATTERNS.append(reg_ex) + + +common_cli_opts = [ + cfg.BoolOpt('debug', + short='d', + default=False, + help='Print debugging output (set logging level to ' + 'DEBUG instead of default WARNING level).'), + cfg.BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output (set logging level to ' + 'INFO instead of default WARNING level).'), +] + +logging_cli_opts = [ + cfg.StrOpt('log-config-append', + metavar='PATH', + deprecated_name='log-config', + help='The name of logging configuration file. It does not ' + 'disable existing loggers, but just appends specified ' + 'logging configuration to any other existing logging ' + 'options. Please see the Python logging module ' + 'documentation for details on logging configuration ' + 'files.'), + cfg.StrOpt('log-format', + default=None, + metavar='FORMAT', + help='DEPRECATED. ' + 'A logging.Formatter log message format string which may ' + 'use any of the available logging.LogRecord attributes. ' + 'This option is deprecated. Please use ' + 'logging_context_format_string and ' + 'logging_default_format_string instead.'), + cfg.StrOpt('log-date-format', + default=_DEFAULT_LOG_DATE_FORMAT, + metavar='DATE_FORMAT', + help='Format string for %%(asctime)s in log records. ' + 'Default: %(default)s'), + cfg.StrOpt('log-file', + metavar='PATH', + deprecated_name='logfile', + help='(Optional) Name of log file to output to. ' + 'If no default is set, logging will go to stdout.'), + cfg.StrOpt('log-dir', + deprecated_name='logdir', + help='(Optional) The base directory used for relative ' + '--log-file paths'), + cfg.BoolOpt('use-syslog', + default=False, + help='Use syslog for logging. ' + 'Existing syslog format is DEPRECATED during I, ' + 'and then will be changed in J to honor RFC5424'), + cfg.BoolOpt('use-syslog-rfc-format', + # TODO(bogdando) remove or use True after existing + # syslog format deprecation in J + default=False, + help='(Optional) Use syslog rfc5424 format for logging. ' + 'If enabled, will add APP-NAME (RFC5424) before the ' + 'MSG part of the syslog message. The old format ' + 'without APP-NAME is deprecated in I, ' + 'and will be removed in J.'), + cfg.StrOpt('syslog-log-facility', + default='LOG_USER', + help='Syslog facility to receive log lines') +] + +generic_log_opts = [ + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error') +] + +log_opts = [ + cfg.StrOpt('logging_context_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [%(request_id)s %(user_identity)s] ' + '%(instance)s%(message)s', + help='Format string to use for log messages with context'), + cfg.StrOpt('logging_default_format_string', + default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' + '%(name)s [-] %(instance)s%(message)s', + help='Format string to use for log messages without context'), + cfg.StrOpt('logging_debug_format_suffix', + default='%(funcName)s %(pathname)s:%(lineno)d', + help='Data to append to log format when level is DEBUG'), + cfg.StrOpt('logging_exception_prefix', + default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' + '%(instance)s', + help='Prefix each line of exception output with this format'), + cfg.ListOpt('default_log_levels', + default=[ + 'amqp=WARN', + 'amqplib=WARN', + 'boto=WARN', + 'qpid=WARN', + 'sqlalchemy=WARN', + 'suds=INFO', + 'iso8601=WARN', + 'requests.packages.urllib3.connectionpool=WARN' + ], + help='List of logger=LEVEL pairs'), + cfg.BoolOpt('publish_errors', + default=False, + help='Publish error events'), + cfg.BoolOpt('fatal_deprecations', + default=False, + help='Make deprecations fatal'), + + # NOTE(mikal): there are two options here because sometimes we are handed + # a full instance (and could include more information), and other times we + # are just handed a UUID for the instance. + cfg.StrOpt('instance_format', + default='[instance: %(uuid)s] ', + help='If an instance is passed with the log message, format ' + 'it like this'), + cfg.StrOpt('instance_uuid_format', + default='[instance: %(uuid)s] ', + help='If an instance UUID is passed with the log message, ' + 'format it like this'), +] + +CONF = cfg.CONF +CONF.register_cli_opts(common_cli_opts) +CONF.register_cli_opts(logging_cli_opts) +CONF.register_opts(generic_log_opts) +CONF.register_opts(log_opts) + +# our new audit level +# NOTE(jkoelker) Since we synthesized an audit level, make the logging +# module aware of it so it acts like other levels. +logging.AUDIT = logging.INFO + 1 +logging.addLevelName(logging.AUDIT, 'AUDIT') + + +try: + NullHandler = logging.NullHandler +except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 + class NullHandler(logging.Handler): + def handle(self, record): + pass + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + + +def _dictify_context(context): + if context is None: + return None + if not isinstance(context, dict) and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def _get_binary_name(): + return os.path.basename(inspect.stack()[-1][1]) + + +def _get_log_file_path(binary=None): + logfile = CONF.log_file + logdir = CONF.log_dir + + if logfile and not logdir: + return logfile + + if logfile and logdir: + return os.path.join(logdir, logfile) + + if logdir: + binary = binary or _get_binary_name() + return '%s.log' % (os.path.join(logdir, binary),) + + return None + + +def mask_password(message, secret="***"): + """Replace password with 'secret' in message. + + :param message: The string which includes security information. + :param secret: value with which to replace passwords. + :returns: The unicode value of message with the password fields masked. + + For example: + + >>> mask_password("'adminPass' : 'aaaaa'") + "'adminPass' : '***'" + >>> mask_password("'admin_pass' : 'aaaaa'") + "'admin_pass' : '***'" + >>> mask_password('"password" : "aaaaa"') + '"password" : "***"' + >>> mask_password("'original_password' : 'aaaaa'") + "'original_password' : '***'" + >>> mask_password("u'original_password' : u'aaaaa'") + "u'original_password' : u'***'" + """ + message = six.text_type(message) + + # NOTE(ldbragst): Check to see if anything in message contains any key + # specified in _SANITIZE_KEYS, if not then just return the message since + # we don't have to mask any passwords. + if not any(key in message for key in _SANITIZE_KEYS): + return message + + secret = r'\g<1>' + secret + r'\g<2>' + for pattern in _SANITIZE_PATTERNS: + message = re.sub(pattern, secret, message) + return message + + +class BaseLoggerAdapter(logging.LoggerAdapter): + + def audit(self, msg, *args, **kwargs): + self.log(logging.AUDIT, msg, *args, **kwargs) + + +class LazyAdapter(BaseLoggerAdapter): + def __init__(self, name='unknown', version='unknown'): + self._logger = None + self.extra = {} + self.name = name + self.version = version + + @property + def logger(self): + if not self._logger: + self._logger = getLogger(self.name, self.version) + return self._logger + + +class ContextAdapter(BaseLoggerAdapter): + warn = logging.LoggerAdapter.warning + + def __init__(self, logger, project_name, version_string): + self.logger = logger + self.project = project_name + self.version = version_string + + @property + def handlers(self): + return self.logger.handlers + + def deprecated(self, msg, *args, **kwargs): + stdmsg = _("Deprecated: %s") % msg + if CONF.fatal_deprecations: + self.critical(stdmsg, *args, **kwargs) + raise DeprecatedConfig(msg=stdmsg) + else: + self.warn(stdmsg, *args, **kwargs) + + def process(self, msg, kwargs): + # NOTE(mrodden): catch any Message/other object and + # coerce to unicode before they can get + # to the python logging and possibly + # cause string encoding trouble + if not isinstance(msg, six.string_types): + msg = six.text_type(msg) + + if 'extra' not in kwargs: + kwargs['extra'] = {} + extra = kwargs['extra'] + + context = kwargs.pop('context', None) + if not context: + context = getattr(local.store, 'context', None) + if context: + extra.update(_dictify_context(context)) + + instance = kwargs.pop('instance', None) + instance_uuid = (extra.get('instance_uuid', None) or + kwargs.pop('instance_uuid', None)) + instance_extra = '' + if instance: + instance_extra = CONF.instance_format % instance + elif instance_uuid: + instance_extra = (CONF.instance_uuid_format + % {'uuid': instance_uuid}) + extra['instance'] = instance_extra + + extra.setdefault('user_identity', kwargs.pop('user_identity', None)) + + extra['project'] = self.project + extra['version'] = self.version + extra['extra'] = extra.copy() + return msg, kwargs + + +class JSONFormatter(logging.Formatter): + def __init__(self, fmt=None, datefmt=None): + # NOTE(jkoelker) we ignore the fmt argument, but its still there + # since logging.config.fileConfig passes it. + self.datefmt = datefmt + + def formatException(self, ei, strip_newlines=True): + lines = traceback.format_exception(*ei) + if strip_newlines: + lines = [moves.filter( + lambda x: x, + line.rstrip().splitlines()) for line in lines] + lines = list(itertools.chain(*lines)) + return lines + + def format(self, record): + message = {'message': record.getMessage(), + 'asctime': self.formatTime(record, self.datefmt), + 'name': record.name, + 'msg': record.msg, + 'args': record.args, + 'levelname': record.levelname, + 'levelno': record.levelno, + 'pathname': record.pathname, + 'filename': record.filename, + 'module': record.module, + 'lineno': record.lineno, + 'funcname': record.funcName, + 'created': record.created, + 'msecs': record.msecs, + 'relative_created': record.relativeCreated, + 'thread': record.thread, + 'thread_name': record.threadName, + 'process_name': record.processName, + 'process': record.process, + 'traceback': None} + + if hasattr(record, 'extra'): + message['extra'] = record.extra + + if record.exc_info: + message['traceback'] = self.formatException(record.exc_info) + + return jsonutils.dumps(message) + + +def _create_logging_excepthook(product_name): + def logging_excepthook(exc_type, value, tb): + extra = {} + if CONF.verbose or CONF.debug: + extra['exc_info'] = (exc_type, value, tb) + getLogger(product_name).critical( + "".join(traceback.format_exception_only(exc_type, value)), + **extra) + return logging_excepthook + + +class LogConfigError(Exception): + + message = _('Error loading logging config %(log_config)s: %(err_msg)s') + + def __init__(self, log_config, err_msg): + self.log_config = log_config + self.err_msg = err_msg + + def __str__(self): + return self.message % dict(log_config=self.log_config, + err_msg=self.err_msg) + + +def _load_log_config(log_config_append): + try: + logging.config.fileConfig(log_config_append, + disable_existing_loggers=False) + except moves.configparser.Error as exc: + raise LogConfigError(log_config_append, str(exc)) + + +def setup(product_name): + """Setup logging.""" + if CONF.log_config_append: + _load_log_config(CONF.log_config_append) + else: + _setup_logging_from_conf() + sys.excepthook = _create_logging_excepthook(product_name) + + +def set_defaults(logging_context_format_string): + cfg.set_defaults(log_opts, + logging_context_format_string= + logging_context_format_string) + + +def _find_facility_from_conf(): + facility_names = logging.handlers.SysLogHandler.facility_names + facility = getattr(logging.handlers.SysLogHandler, + CONF.syslog_log_facility, + None) + + if facility is None and CONF.syslog_log_facility in facility_names: + facility = facility_names.get(CONF.syslog_log_facility) + + if facility is None: + valid_facilities = facility_names.keys() + consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', + 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', + 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', + 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', + 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] + valid_facilities.extend(consts) + raise TypeError(_('syslog facility must be one of: %s') % + ', '.join("'%s'" % fac + for fac in valid_facilities)) + + return facility + + +class RFCSysLogHandler(logging.handlers.SysLogHandler): + def __init__(self, *args, **kwargs): + self.binary_name = _get_binary_name() + super(RFCSysLogHandler, self).__init__(*args, **kwargs) + + def format(self, record): + msg = super(RFCSysLogHandler, self).format(record) + msg = self.binary_name + ' ' + msg + return msg + + +def _setup_logging_from_conf(): + log_root = getLogger(None).logger + for handler in log_root.handlers: + log_root.removeHandler(handler) + + if CONF.use_syslog: + facility = _find_facility_from_conf() + # TODO(bogdando) use the format provided by RFCSysLogHandler + # after existing syslog format deprecation in J + if CONF.use_syslog_rfc_format: + syslog = RFCSysLogHandler(address='/dev/log', + facility=facility) + else: + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) + log_root.addHandler(syslog) + + logpath = _get_log_file_path() + if logpath: + filelog = logging.handlers.WatchedFileHandler(logpath) + log_root.addHandler(filelog) + + if CONF.use_stderr: + streamlog = ColorHandler() + log_root.addHandler(streamlog) + + elif not logpath: + # pass sys.stdout as a positional argument + # python2.6 calls the argument strm, in 2.7 it's stream + streamlog = logging.StreamHandler(sys.stdout) + log_root.addHandler(streamlog) + + if CONF.publish_errors: + handler = importutils.import_object( + "gceapi.openstack.common.log_handler.PublishErrorsHandler", + logging.ERROR) + log_root.addHandler(handler) + + datefmt = CONF.log_date_format + for handler in log_root.handlers: + # NOTE(alaski): CONF.log_format overrides everything currently. This + # should be deprecated in favor of context aware formatting. + if CONF.log_format: + handler.setFormatter(logging.Formatter(fmt=CONF.log_format, + datefmt=datefmt)) + log_root.info('Deprecated: log_format is now deprecated and will ' + 'be removed in the next release') + else: + handler.setFormatter(ContextFormatter(datefmt=datefmt)) + + if CONF.debug: + log_root.setLevel(logging.DEBUG) + elif CONF.verbose: + log_root.setLevel(logging.INFO) + else: + log_root.setLevel(logging.WARNING) + + for pair in CONF.default_log_levels: + mod, _sep, level_name = pair.partition('=') + level = logging.getLevelName(level_name) + logger = logging.getLogger(mod) + logger.setLevel(level) + +_loggers = {} + + +def getLogger(name='unknown', version='unknown'): + if name not in _loggers: + _loggers[name] = ContextAdapter(logging.getLogger(name), + name, + version) + return _loggers[name] + + +def getLazyLogger(name='unknown', version='unknown'): + """Returns lazy logger. + + Creates a pass-through logger that does not create the real logger + until it is really needed and delegates all calls to the real logger + once it is created. + """ + return LazyAdapter(name, version) + + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.INFO): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg.rstrip()) + + +class ContextFormatter(logging.Formatter): + """A context.RequestContext aware formatter configured through flags. + + The flags used to set format strings are: logging_context_format_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + + """ + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default.""" + # NOTE(sdague): default the fancier formatting params + # to an empty string so we don't throw an exception if + # they get used + for key in ('instance', 'color'): + if key not in record.__dict__: + record.__dict__[key] = '' + + if record.__dict__.get('request_id', None): + self._fmt = CONF.logging_context_format_string + else: + self._fmt = CONF.logging_default_format_string + + if (record.levelno == logging.DEBUG and + CONF.logging_debug_format_suffix): + self._fmt += " " + CONF.logging_debug_format_suffix + + # Cache this on the record, Logger will respect our formatted copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with CONF.logging_exception_prefix.""" + if not record: + return logging.Formatter.formatException(self, exc_info) + + stringbuffer = moves.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split('\n') + stringbuffer.close() + + if CONF.logging_exception_prefix.find('%(asctime)') != -1: + record.asctime = self.formatTime(record, self.datefmt) + + formatted_lines = [] + for line in lines: + pl = CONF.logging_exception_prefix % record.__dict__ + fl = '%s%s' % (pl, line) + formatted_lines.append(fl) + return '\n'.join(formatted_lines) + + +class ColorHandler(logging.StreamHandler): + LEVEL_COLORS = { + logging.DEBUG: '\033[00;32m', # GREEN + logging.INFO: '\033[00;36m', # CYAN + logging.AUDIT: '\033[01;36m', # BOLD CYAN + logging.WARN: '\033[01;33m', # BOLD YELLOW + logging.ERROR: '\033[01;31m', # BOLD RED + logging.CRITICAL: '\033[01;31m', # BOLD RED + } + + def format(self, record): + record.color = self.LEVEL_COLORS[record.levelno] + return logging.StreamHandler.format(self, record) + + +class DeprecatedConfig(Exception): + message = _("Fatal call to deprecated config: %(msg)s") + + def __init__(self, msg): + super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/gceapi/openstack/common/py3kcompat/__init__.py b/gceapi/openstack/common/py3kcompat/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gceapi/openstack/common/py3kcompat/urlutils.py b/gceapi/openstack/common/py3kcompat/urlutils.py new file mode 100644 index 0000000..84e457a --- /dev/null +++ b/gceapi/openstack/common/py3kcompat/urlutils.py @@ -0,0 +1,67 @@ +# +# Copyright 2013 Canonical Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +""" +Python2/Python3 compatibility layer for OpenStack +""" + +import six + +if six.PY3: + # python3 + import urllib.error + import urllib.parse + import urllib.request + + urlencode = urllib.parse.urlencode + urljoin = urllib.parse.urljoin + quote = urllib.parse.quote + quote_plus = urllib.parse.quote_plus + parse_qsl = urllib.parse.parse_qsl + unquote = urllib.parse.unquote + unquote_plus = urllib.parse.unquote_plus + urlparse = urllib.parse.urlparse + urlsplit = urllib.parse.urlsplit + urlunsplit = urllib.parse.urlunsplit + SplitResult = urllib.parse.SplitResult + + urlopen = urllib.request.urlopen + URLError = urllib.error.URLError + pathname2url = urllib.request.pathname2url +else: + # python2 + import urllib + import urllib2 + import urlparse + + urlencode = urllib.urlencode + quote = urllib.quote + quote_plus = urllib.quote_plus + unquote = urllib.unquote + unquote_plus = urllib.unquote_plus + + parse = urlparse + parse_qsl = parse.parse_qsl + urljoin = parse.urljoin + urlparse = parse.urlparse + urlsplit = parse.urlsplit + urlunsplit = parse.urlunsplit + SplitResult = parse.SplitResult + + urlopen = urllib2.urlopen + URLError = urllib2.URLError + pathname2url = urllib.pathname2url diff --git a/gceapi/openstack/common/test.py b/gceapi/openstack/common/test.py new file mode 100644 index 0000000..c406f5d --- /dev/null +++ b/gceapi/openstack/common/test.py @@ -0,0 +1,88 @@ +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Common utilities used in testing""" + +import logging +import os +import tempfile + +import fixtures +import testtools + +_TRUE_VALUES = ('True', 'true', '1', 'yes') +_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s" + + +class BaseTestCase(testtools.TestCase): + + def setUp(self): + super(BaseTestCase, self).setUp() + self._set_timeout() + self._fake_output() + self._fake_logs() + self.useFixture(fixtures.NestedTempfile()) + self.useFixture(fixtures.TempHomeDir()) + self.tempdirs = [] + + def _set_timeout(self): + test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) + try: + test_timeout = int(test_timeout) + except ValueError: + # If timeout value is invalid do not set a timeout. + test_timeout = 0 + if test_timeout > 0: + self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) + + def _fake_output(self): + if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES: + stdout = self.useFixture(fixtures.StringStream('stdout')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) + if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES: + stderr = self.useFixture(fixtures.StringStream('stderr')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) + + def _fake_logs(self): + if os.environ.get('OS_DEBUG') in _TRUE_VALUES: + level = logging.DEBUG + else: + level = logging.INFO + capture_logs = os.environ.get('OS_LOG_CAPTURE') in _TRUE_VALUES + if capture_logs: + self.useFixture( + fixtures.FakeLogger( + format=_LOG_FORMAT, + level=level, + nuke_handlers=capture_logs, + ) + ) + else: + logging.basicConfig(format=_LOG_FORMAT, level=level) + + def create_tempfiles(self, files, ext='.conf'): + tempfiles = [] + for (basename, contents) in files: + if not os.path.isabs(basename): + (fd, path) = tempfile.mkstemp(prefix=basename, suffix=ext) + else: + path = basename + ext + fd = os.open(path, os.O_CREAT | os.O_WRONLY) + tempfiles.append(path) + try: + os.write(fd, contents) + finally: + os.close(fd) + return tempfiles diff --git a/gceapi/openstack/common/timeutils.py b/gceapi/openstack/common/timeutils.py new file mode 100644 index 0000000..52688a0 --- /dev/null +++ b/gceapi/openstack/common/timeutils.py @@ -0,0 +1,210 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Time related utilities and helper functions. +""" + +import calendar +import datetime +import time + +import iso8601 +import six + + +# ISO 8601 extended time format with microseconds +_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' +_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' +PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND + + +def isotime(at=None, subsecond=False): + """Stringify time in ISO 8601 format.""" + if not at: + at = utcnow() + st = at.strftime(_ISO8601_TIME_FORMAT + if not subsecond + else _ISO8601_TIME_FORMAT_SUBSECOND) + tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' + st += ('Z' if tz == 'UTC' else tz) + return st + + +def parse_isotime(timestr): + """Parse time from ISO 8601 format.""" + try: + return iso8601.parse_date(timestr) + except iso8601.ParseError as e: + raise ValueError(six.text_type(e)) + except TypeError as e: + raise ValueError(six.text_type(e)) + + +def strtime(at=None, fmt=PERFECT_TIME_FORMAT): + """Returns formatted utcnow.""" + if not at: + at = utcnow() + return at.strftime(fmt) + + +def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): + """Turn a formatted time back into a datetime.""" + return datetime.datetime.strptime(timestr, fmt) + + +def normalize_time(timestamp): + """Normalize time in arbitrary timezone to UTC naive object.""" + offset = timestamp.utcoffset() + if offset is None: + return timestamp + return timestamp.replace(tzinfo=None) - offset + + +def is_older_than(before, seconds): + """Return True if before is older than seconds.""" + if isinstance(before, six.string_types): + before = parse_strtime(before).replace(tzinfo=None) + else: + before = before.replace(tzinfo=None) + + return utcnow() - before > datetime.timedelta(seconds=seconds) + + +def is_newer_than(after, seconds): + """Return True if after is newer than seconds.""" + if isinstance(after, six.string_types): + after = parse_strtime(after).replace(tzinfo=None) + else: + after = after.replace(tzinfo=None) + + return after - utcnow() > datetime.timedelta(seconds=seconds) + + +def utcnow_ts(): + """Timestamp version of our utcnow function.""" + if utcnow.override_time is None: + # NOTE(kgriffs): This is several times faster + # than going through calendar.timegm(...) + return int(time.time()) + + return calendar.timegm(utcnow().timetuple()) + + +def utcnow(): + """Overridable version of utils.utcnow.""" + if utcnow.override_time: + try: + return utcnow.override_time.pop(0) + except AttributeError: + return utcnow.override_time + return datetime.datetime.utcnow() + + +def iso8601_from_timestamp(timestamp): + """Returns a iso8601 formatted date from timestamp.""" + return isotime(datetime.datetime.utcfromtimestamp(timestamp)) + + +utcnow.override_time = None + + +def set_time_override(override_time=None): + """Overrides utils.utcnow. + + Make it return a constant time or a list thereof, one at a time. + + :param override_time: datetime instance or list thereof. If not + given, defaults to the current UTC time. + """ + utcnow.override_time = override_time or datetime.datetime.utcnow() + + +def advance_time_delta(timedelta): + """Advance overridden time using a datetime.timedelta.""" + assert(not utcnow.override_time is None) + try: + for dt in utcnow.override_time: + dt += timedelta + except TypeError: + utcnow.override_time += timedelta + + +def advance_time_seconds(seconds): + """Advance overridden time by seconds.""" + advance_time_delta(datetime.timedelta(0, seconds)) + + +def clear_time_override(): + """Remove the overridden time.""" + utcnow.override_time = None + + +def marshall_now(now=None): + """Make an rpc-safe datetime with microseconds. + + Note: tzinfo is stripped, but not required for relative times. + """ + if not now: + now = utcnow() + return dict(day=now.day, month=now.month, year=now.year, hour=now.hour, + minute=now.minute, second=now.second, + microsecond=now.microsecond) + + +def unmarshall_time(tyme): + """Unmarshall a datetime dict.""" + return datetime.datetime(day=tyme['day'], + month=tyme['month'], + year=tyme['year'], + hour=tyme['hour'], + minute=tyme['minute'], + second=tyme['second'], + microsecond=tyme['microsecond']) + + +def delta_seconds(before, after): + """Return the difference between two timing objects. + + Compute the difference in seconds between two date, time, or + datetime objects (as a float, to microsecond resolution). + """ + delta = after - before + return total_seconds(delta) + + +def total_seconds(delta): + """Return the total seconds of datetime.timedelta object. + + Compute total seconds of datetime.timedelta, datetime.timedelta + doesn't have method total_seconds in Python2.6, calculate it manually. + """ + try: + return delta.total_seconds() + except AttributeError: + return ((delta.days * 24 * 3600) + delta.seconds + + float(delta.microseconds) / (10 ** 6)) + + +def is_soon(dt, window): + """Determines if time is going to happen in the next window seconds. + + :param dt: the time + :param window: minimum seconds to remain to consider the time not soon + + :return: True if expiration is within the given duration + """ + soon = (utcnow() + datetime.timedelta(seconds=window)) + return normalize_time(dt) <= soon diff --git a/gceapi/paths.py b/gceapi/paths.py new file mode 100644 index 0000000..7069f95 --- /dev/null +++ b/gceapi/paths.py @@ -0,0 +1,68 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from oslo.config import cfg + +path_opts = [ + cfg.StrOpt('pybasedir', + default=os.path.abspath(os.path.join(os.path.dirname(__file__), + '../')), + help='Directory where the gceapi python module is installed'), + cfg.StrOpt('bindir', + default='$pybasedir/bin', + help='Directory where gceapi binaries are installed'), + cfg.StrOpt('state_path', + default='$pybasedir', + help="Top-level directory for maintaining gceapi's state"), +] + +CONF = cfg.CONF +CONF.register_opts(path_opts) + + +def basedir_def(*args): + """Return an uninterpolated path relative to $pybasedir.""" + return os.path.join('$pybasedir', *args) + + +def bindir_def(*args): + """Return an uninterpolated path relative to $bindir.""" + return os.path.join('$bindir', *args) + + +def state_path_def(*args): + """Return an uninterpolated path relative to $state_path.""" + return os.path.join('$state_path', *args) + + +def basedir_rel(*args): + """Return a path relative to $pybasedir.""" + return os.path.join(CONF.pybasedir, *args) + + +def bindir_rel(*args): + """Return a path relative to $bindir.""" + return os.path.join(CONF.bindir, *args) + + +def state_path_rel(*args): + """Return a path relative to $state_path.""" + return os.path.join(CONF.state_path, *args) diff --git a/gceapi/service.py b/gceapi/service.py new file mode 100644 index 0000000..2bd8426 --- /dev/null +++ b/gceapi/service.py @@ -0,0 +1,263 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import signal +import sys + +import eventlet +import greenlet +from oslo.config import cfg + +from gceapi.openstack.common import eventlet_backdoor +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import importutils +from gceapi.openstack.common import log as logging +from gceapi import wsgi + +LOG = logging.getLogger(__name__) + +service_opts = [ + cfg.BoolOpt('use_ssl', + default=False, + help='Enable ssl connections or not'), + cfg.IntOpt('service_down_time', + default=60, + help='maximum time since last check-in for up service'), + cfg.StrOpt('gce_listen', + default="0.0.0.0", + help='IP address for gce api to listen'), + cfg.IntOpt('gce_listen_port', + default=8777, + help='port for gce api to listen'), + cfg.StrOpt('network_api', + default="neutron", + help='Name of network API. neutron(quantum) or nova'), + ] + +CONF = cfg.CONF +CONF.register_opts(service_opts) + + +class SignalExit(SystemExit): + def __init__(self, signo, exccode=1): + super(SignalExit, self).__init__(exccode) + self.signo = signo + + +class Launcher(object): + """Launch one or more services and wait for them to complete.""" + + def __init__(self): + """Initialize the service launcher. + + :returns: None + + """ + self._services = [] + self.backdoor_port = eventlet_backdoor.initialize_if_enabled() + + @staticmethod + def run_server(server): + """Start and wait for a server to finish. + + :param service: Server to run and wait for. + :returns: None + + """ + server.start() + server.wait() + + def launch_server(self, server): + """Load and start the given server. + + :param server: The server you would like to start. + :returns: None + + """ + if self.backdoor_port is not None: + server.backdoor_port = self.backdoor_port + gt = eventlet.spawn(self.run_server, server) + self._services.append(gt) + + def stop(self): + """Stop all services which are currently running. + + :returns: None + + """ + for service in self._services: + service.kill() + + def wait(self): + """Waits until all services have been stopped, and then returns. + + :returns: None + + """ + for service in self._services: + try: + service.wait() + except greenlet.GreenletExit: + pass + + +class ServiceLauncher(Launcher): + def _handle_signal(self, signo, frame): + # Allow the process to be killed again and die from natural causes + signal.signal(signal.SIGTERM, signal.SIG_DFL) + signal.signal(signal.SIGINT, signal.SIG_DFL) + + raise SignalExit(signo) + + def wait(self): + signal.signal(signal.SIGTERM, self._handle_signal) + signal.signal(signal.SIGINT, self._handle_signal) + + LOG.debug(_('Full set of CONF:')) + for flag in CONF: + flag_get = CONF.get(flag, None) + # hide flag contents from log if contains a password + # should use secret flag when switch over to openstack-common + if ("_password" in flag or "_key" in flag or + (flag == "sql_connection" and "mysql:" in flag_get)): + LOG.debug(_('%(flag)s : FLAG SET ') % {'flag': flag}) + else: + LOG.debug('%(flag)s : %(flag_get)s' % {'flag': flag, + 'flag_get': flag_get}) + + status = None + try: + super(ServiceLauncher, self).wait() + except SignalExit as exc: + signame = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'}[exc.signo] + LOG.info(_('Caught %s, exiting'), signame) + status = exc.code + except SystemExit as exc: + status = exc.code + finally: + self.stop() + + if status is not None: + sys.exit(status) + + +class WSGIService(object): + """Provides ability to launch API from a 'paste' configuration.""" + + def __init__(self, name, loader=None, use_ssl=False, max_url_len=None): + """Initialize, but do not start the WSGI server. + + :param name: The name of the WSGI server given to the loader. + :param loader: Loads the WSGI application using the given name. + :returns: None + + """ + self.name = name + self.manager = self._get_manager() + self.loader = loader or wsgi.Loader() + self.app = self.loader.load_app(name) + self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") + self.port = getattr(CONF, '%s_listen_port' % name, 0) + self.use_ssl = use_ssl + self.server = wsgi.Server(name, + self.app, + host=self.host, + port=self.port, + use_ssl=self.use_ssl, + max_url_len=max_url_len) + # Pull back actual port used + self.port = self.server.port + self.backdoor_port = None + + def _get_manager(self): + """Initialize a Manager object appropriate for this service. + + Use the service name to look up a Manager subclass from the + configuration and initialize an instance. If no class name + is configured, just return None. + + :returns: a Manager instance, or None. + + """ + fl = '%s_manager' % self.name + if fl not in CONF: + return None + + manager_class_name = CONF.get(fl, None) + if not manager_class_name: + return None + + manager_class = importutils.import_class(manager_class_name) + return manager_class() + + def start(self): + """Start serving this service using loaded configuration. + + Also, retrieve updated port number in case '0' was passed in, which + indicates a random port should be used. + + :returns: None + + """ + if self.manager: + self.manager.init_host() + self.manager.pre_start_hook() + if self.backdoor_port is not None: + self.manager.backdoor_port = self.backdoor_port + self.server.start() + if self.manager: + self.manager.post_start_hook() + + def stop(self): + """Stop serving this API. + + :returns: None + + """ + self.server.stop() + + def wait(self): + """Wait for the service to stop serving this API. + + :returns: None + + """ + self.server.wait() + + +# NOTE(vish): the global launcher is to maintain the existing +# functionality of calling service.serve + +# service.wait +_launcher = None + + +def serve(server): + global _launcher + if _launcher: + raise RuntimeError(_('serve() can only be called once')) + + _launcher = ServiceLauncher() + _launcher.launch_server(server) + + +def wait(): + _launcher.wait() diff --git a/gceapi/test.py b/gceapi/test.py new file mode 100644 index 0000000..0394e9c --- /dev/null +++ b/gceapi/test.py @@ -0,0 +1,180 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Base classes for our unit tests. + +Allows overriding of flags for use of fakes, and some black magic for +inline callbacks. + +""" + +import os +import shutil + +import collections +import eventlet +import fixtures +import mox +from oslo.config import cfg +import stubout +import testtools + +from gceapi.openstack.common import log as logging +from gceapi import paths + + +test_opts = [ + cfg.StrOpt('sqlite_clean_db', + default='clean.sqlite', + help='File name of clean sqlite db'), + cfg.StrOpt('network_api', + default="neutron", + help='Name of network API. neutron(quantum) or nova'), + ] + +CONF = cfg.CONF +CONF.register_opts(test_opts) +CONF.import_opt('connection', + 'gceapi.openstack.common.db.sqlalchemy.session', + group='database') +CONF.set_override('use_stderr', False) + +logging.setup('gceapi') +LOG = logging.getLogger(__name__) + +eventlet.monkey_patch(os=False) + +_DB_CACHE = None + + +class Database(fixtures.Fixture): + + def __init__(self, db_session, db_migrate, sql_connection, + sqlite_db, sqlite_clean_db): + self.sql_connection = sql_connection + self.sqlite_db = sqlite_db + self.sqlite_clean_db = sqlite_clean_db + + self.engine = db_session.get_engine() + self.engine.dispose() + conn = self.engine.connect() + if sql_connection == "sqlite://": + if db_migrate.db_version() > db_migrate.INIT_VERSION: + return + else: + testdb = paths.state_path_rel(sqlite_db) + if os.path.exists(testdb): + return + db_migrate.db_sync() + self.post_migrations() + if sql_connection == "sqlite://": + conn = self.engine.connect() + self._DB = "".join(line for line in conn.connection.iterdump()) + self.engine.dispose() + else: + cleandb = paths.state_path_rel(sqlite_clean_db) + shutil.copyfile(testdb, cleandb) + + def setUp(self): + super(Database, self).setUp() + + if self.sql_connection == "sqlite://": + conn = self.engine.connect() + conn.connection.executescript(self._DB) + self.addCleanup(self.engine.dispose) + else: + shutil.copyfile(paths.state_path_rel(self.sqlite_clean_db), + paths.state_path_rel(self.sqlite_db)) + + +class MoxStubout(fixtures.Fixture): + """Deal with code around mox and stubout as a fixture.""" + + def setUp(self): + super(MoxStubout, self).setUp() + # emulate some of the mox stuff, we can't use the metaclass + # because it screws with our generators + self.mox = mox.Mox() + self.stubs = stubout.StubOutForTesting() + self.addCleanup(self.mox.UnsetStubs) + self.addCleanup(self.stubs.UnsetAll) + self.addCleanup(self.stubs.SmartUnsetAll) + self.addCleanup(self.mox.VerifyAll) + + +class TestCase(testtools.TestCase): + """Test case base class for all unit tests.""" + + def setUp(self): + """Run before each test method to initialize test environment.""" + super(TestCase, self).setUp() + test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) + try: + test_timeout = int(test_timeout) + except ValueError: + # If timeout value is invalid do not set a timeout. + test_timeout = 0 + if test_timeout > 0: + self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) + self.useFixture(fixtures.NestedTempfile()) + self.useFixture(fixtures.TempHomeDir()) + + if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or + os.environ.get('OS_STDOUT_CAPTURE') == '1'): + stdout = self.useFixture(fixtures.StringStream('stdout')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) + if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or + os.environ.get('OS_STDERR_CAPTURE') == '1'): + stderr = self.useFixture(fixtures.StringStream('stderr')).stream + self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) + + self.log_fixture = self.useFixture(fixtures.FakeLogger('gceapi')) + + mox_fixture = self.useFixture(MoxStubout()) + self.mox = mox_fixture.mox + self.stubs = mox_fixture.stubs + self.addCleanup(self._clear_attrs) + self.useFixture(fixtures.EnvironmentVariable('http_proxy')) + CONF.set_override('fatal_exception_format_errors', True) + + def _clear_attrs(self): + # Delete attributes that don't start with _ so they don't pin + # memory around unnecessarily for the duration of the test + # suite + for key in [k for k in self.__dict__.keys() if k[0] != '_']: + del self.__dict__[key] + + def flags(self, **kw): + """Override flag variables for a test.""" + group = kw.pop('group', None) + for k, v in kw.iteritems(): + CONF.set_override(k, v, group) + + def assertDictEqual(self, d1, d2, msg=None): + for k, v1 in d1.iteritems(): + self.assertIn(k, d2) + v2 = d2[k] + if(isinstance(v1, collections.Iterable) and + not isinstance(v1, basestring)): + self.assertItemsEqual(v1, v2, msg) + else: + self.assertEqual(v1, v2, msg) + return True + + def assertItemsEqual(self, expected_seq, actual_seq, msg=None): + self.assertEqual(sorted(expected_seq), sorted(actual_seq), msg) diff --git a/gceapi/tests/__init__.py b/gceapi/tests/__init__.py new file mode 100644 index 0000000..9111b80 --- /dev/null +++ b/gceapi/tests/__init__.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`gceapi.tests` -- Gceapi Unittests +===================================================== + +.. automodule:: gceapi.tests + :platform: Unix +""" + +# See http://code.google.com/p/python-nose/issues/detail?id=373 +# The code below enables nosetests to work with i18n _() blocks +import __builtin__ +setattr(__builtin__, '_', lambda x: x) diff --git a/gceapi/tests/api/__init__.py b/gceapi/tests/api/__init__.py new file mode 100644 index 0000000..d65c689 --- /dev/null +++ b/gceapi/tests/api/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/gceapi/tests/api/common.py b/gceapi/tests/api/common.py new file mode 100644 index 0000000..205b54a --- /dev/null +++ b/gceapi/tests/api/common.py @@ -0,0 +1,136 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import uuid + +from cinderclient import client as cinderclient +from glanceclient import client as glanceclient +from keystoneclient.v2_0 import client as kc +from neutronclient.v2_0 import client as neutronclient +from novaclient import client as novaclient +from novaclient import shell as novashell + +import gceapi.api +from gceapi.openstack.common import timeutils +from gceapi import test +from gceapi.tests.api import fake_cinder_client +from gceapi.tests.api import fake_db +from gceapi.tests.api import fake_glance_client +from gceapi.tests.api import fake_keystone_client +from gceapi.tests.api import fake_neutron_client +from gceapi.tests.api import fake_nova_client +from gceapi.tests.api import fake_request + + +COMMON_OPERATION = { + u'kind': u'compute#operation', + u'id': u'2898918100885047175', + u'name': u'operation-735d48a5-284e-4fb4-a10c-a465ac0b8888', + u'selfLink': u'http://localhost/compute/v1beta15/projects/' + 'fake_project/global/operations/' + 'operation-735d48a5-284e-4fb4-a10c-a465ac0b8888', + u'user': u'fake_user', + u'insertTime': u'2013-12-27T08:46:34.684354Z', + u'startTime': u'2013-12-27T08:46:34.684354Z', +} + +COMMON_FINISHED_OPERATION = { + u'progress': 100, + u'status': u'DONE', + u'endTime': u'2013-12-27T08:46:34.684354Z', +} +COMMON_FINISHED_OPERATION.update(COMMON_OPERATION) + +COMMON_PENDING_OPERATION = { + u'progress': 0, + u'status': u'RUNNING', +} +COMMON_PENDING_OPERATION.update(COMMON_OPERATION) + +REGION_OPERATION_SPECIFIC = { + u'id': u'1085621292163955072', + u'selfLink': u'http://localhost/compute/v1beta15/projects/' + 'fake_project/regions/nova/operations/' + 'operation-735d48a5-284e-4fb4-a10c-a465ac0b8888', + u'region': u'http://localhost/compute/v1beta15/projects/' + 'fake_project/regions/nova', +} + +COMMON_REGION_FINISHED_OPERATION = copy.copy(COMMON_FINISHED_OPERATION) +COMMON_REGION_FINISHED_OPERATION.update(REGION_OPERATION_SPECIFIC) + +ZONE_OPERATION_SPECIFIC = { + u'id': u'1422079331329525920', + u'selfLink': u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova/operations/' + 'operation-735d48a5-284e-4fb4-a10c-a465ac0b8888', + u'zone': u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova', +} + +COMMON_ZONE_PENDING_OPERATION = copy.copy(COMMON_PENDING_OPERATION) +COMMON_ZONE_PENDING_OPERATION.update(ZONE_OPERATION_SPECIFIC) +COMMON_ZONE_FINISHED_OPERATION = copy.copy(COMMON_FINISHED_OPERATION) +COMMON_ZONE_FINISHED_OPERATION.update(ZONE_OPERATION_SPECIFIC) + + +class GCEControllerTest(test.TestCase): + + _APIRouter = None + + def setUp(self): + super(GCEControllerTest, self).setUp() + self.maxDiff = None + + self.stubs.Set(kc, 'Client', fake_keystone_client.FakeKeystoneClient) + self.stubs.Set(neutronclient, "Client", + fake_neutron_client.FakeNeutronClient) + self.stubs.Set(glanceclient, "Client", + fake_glance_client.FakeGlanceClient) + self.stubs.Set(cinderclient, "Client", + fake_cinder_client.FakeCinderClient) + self.stubs.Set(novashell.OpenStackComputeShell, '_discover_extensions', + fake_nova_client.fake_discover_extensions) + self.stubs.Set(novaclient, 'Client', fake_nova_client.FakeNovaClient) + self.db_fixture = self.useFixture(fake_db.DBFixture(self.stubs)) + self.stubs.Set( + uuid, "uuid4", + lambda: uuid.UUID("735d48a5-284e-4fb4-a10c-a465ac0b8888")) + # NOTE(ft): we cannot stub datetime.utcnow, + # so we stub conversion from datetime to string + self.stubs.Set(timeutils, "isotime", + lambda x, y: "2013-12-27T08:46:34.684354Z") + + def request_gce(self, url, method="GET", body=None): + fake_req = fake_request.HTTPRequest.blank(url, method=method, + has_body=body is not None) + fake_req.json = body + return fake_req.get_response(self._get_api_router()) + + def assertDictInListBySelfLink(self, expected, container, msg=None): + for member in container: + if expected["selfLink"] != member["selfLink"]: + continue + self.assertDictEqual(expected, member) + return + standardMsg = ('Dictionary id %s not found in dictionary list %s' + % (member["selfLink"], map(lambda x: x["selfLink"], + container))) + self.fail(self._formatMessage(msg, standardMsg)) + + def _get_api_router(self): + if not self._APIRouter: + self._APIRouter = gceapi.api.APIRouter() + return self._APIRouter diff --git a/gceapi/tests/api/fake_cinder_client.py b/gceapi/tests/api/fake_cinder_client.py new file mode 100644 index 0000000..2393fd5 --- /dev/null +++ b/gceapi/tests/api/fake_cinder_client.py @@ -0,0 +1,307 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from cinderclient import exceptions as exc + +from gceapi.tests.api import fake_request +from gceapi.tests.api import utils + + +FAKE_DISKS = [utils.FakeObject({ + "status": "available", + "volume_type": None, + "display_name": "fake-disk-1", + "availability_zone": "nova", + "created_at": "2013-08-14T12:35:22.000000", + "display_description": "fake disk from snapshot", + "metadata": {}, + "snapshot_id": "991cda9c-28bd-420f-8432-f5159def85d6", + "id": "e922ebbb-2938-4a12-869f-cbc4e26c6600", + "size": 2, + "os-vol-tenant-attr:tenant_id": fake_request.PROJECT_ID, + "os-vol-mig-status-attr:name_id": None, + "os-vol-mig-status-attr:migstat": None, + "os-vol-host-attr:host": "grizzly", + "attachments": [], +}), utils.FakeObject({ + "status": "available", + "volume_type": None, + "bootable": u"true", + "volume_image_metadata": { + "image_id": "60ff30c2-64b6-4a97-9c17-322eebc8bd60", + "image_name": "fake-image-1" + }, + "display_name": "fake-disk-2", + "availability_zone": "nova", + "created_at": "2013-08-14T12:19:35.000000", + "display_description": "", + "metadata": {}, + "snapshot_id": None, + "id": "64ebe1d9-757f-4074-88d0-2ac790be909d", + "size": 1, + "os-vol-tenant-attr:tenant_id": fake_request.PROJECT_ID, + "os-vol-mig-status-attr:name_id": None, + "os-vol-mig-status-attr:migstat": None, + "os-vol-host-attr:host": "grizzly", + "attachments": [], +}), utils.FakeObject({ + "status": "available", + "volume_type": None, + "display_name": "fake-disk-3", + "availability_zone": "nova", + "created_at": "2013-08-14T11:57:44.000000", + "display_description": "full description of disk", + "metadata": {}, + "snapshot_id": None, + "id": "fc0d5c01-dc3b-450d-aaed-de028bb832b1", + "size": 3, + "os-vol-tenant-attr:tenant_id": fake_request.PROJECT_ID, + "os-vol-mig-status-attr:name_id": None, + "os-vol-mig-status-attr:migstat": None, + "os-vol-host-attr:host": "grizzly", + "attachments": [], +}), utils.FakeObject({ + "status": "available", + "volume_type": None, + "display_name": "disk-to-delete", + "availability_zone": "nova", + "created_at": "2013-08-14T12:10:02.000000", + "display_description": "full description of disk", + "metadata": {}, + "snapshot_id": None, + "id": "a0786ec1-d838-4ad6-a497-87ec0b79161b", + "size": 3, + "os-vol-tenant-attr:tenant_id": fake_request.PROJECT_ID, + "os-vol-mig-status-attr:name_id": None, + "os-vol-mig-status-attr:migstat": None, + "os-vol-host-attr:host": "grizzly", + "attachments": [], +}), utils.FakeObject({ + "status": "in-use", + "instance_uuid": "6472359b-d46b-4629-83a9-d2ec8d99468c", + "bootable": u"true", + "volume_image_metadata": { + "image_id": "60ff30c2-64b6-4a97-9c17-322eebc8bd60", + "image_name": "fake-image-1"}, + "display_name": "i1", + "availability_zone": "nova", + "created_at": "2013-08-14T18:55:57.000000", + "display_description": "Persistent boot disk created from " + "http://127.0.0.1:8777/compute/v1beta15/projects/admin" + "/global/images/fake-image-1.", + "volume_type": "None", + "metadata": {}, + "snapshot_id": None, + "id": "ab8829ad-eec1-44a2-8068-d7f00c53ee90", + "size": 1, + "os-vol-tenant-attr:tenant_id": fake_request.PROJECT_ID, + "os-vol-mig-status-attr:name_id": None, + "os-vol-mig-status-attr:migstat": None, + "os-vol-host-attr:host": "grizzly", + "attachments": [{ + "device": "vdc", + "server_id": "6472359b-d46b-4629-83a9-d2ec8d99468c", + "volume_id": "ab8829ad-eec1-44a2-8068-d7f00c53ee90", + "host_name": None, + "id": "7f862e44-5f41-4a1f-b2f8-dbd2f6bef86f" + }], +})] + +FAKE_SNAPSHOTS = [utils.FakeObject({ + "status": "available", + "display_name": "fake-snapshot", + "created_at": "2013-08-14T12:32:28.000000", + "display_description": "full description of snapshot 1", + "volume_size": 2, + "volume_id": "fc0d5c01-dc3b-450d-aaed-de028bb832b1", + "progress": "100%", + "project_id": "f0dcd67240544bc6903766a025c6e2b9", + "id": "991cda9c-28bd-420f-8432-f5159def85d6", + "size": 2, +})] + +FAKE_NEW_DISKS = { + "new-disk": { + "status": "available", + "volume_type": None, + "availability_zone": "nova", + "created_at": "2013-08-14T15:00:22.000000", + "display_description": "", + "metadata": {}, + "snapshot_id": None, + "id": "8af36778-84db-475e-b3c9-da2cc260df4a", + "size": 1, + "os-vol-tenant-attr:tenant_id": fake_request.PROJECT_ID, + "os-vol-mig-status-attr:name_id": None, + "os-vol-mig-status-attr:migstat": None, + "os-vol-host-attr:host": "grizzly", + "attachments": [], + }, + "new-image-disk": { + "status": "available", + "volume_type": None, + "bootable": u"true", + "volume_image_metadata": { + "image_id": "a2459075-d96c-40d5-893e-577ff92e721c", + "image_name": "fake-image-2" + }, + "availability_zone": "nova", + "created_at": "2013-08-14T15:56:00.000000", + "display_description": "disk created with image", + "metadata": {}, + "snapshot_id": None, + "id": "f35151b8-7b81-4e76-b2ab-ecdc14f949d2", + "size": 1, + "os-vol-tenant-attr:tenant_id": fake_request.PROJECT_ID, + "os-vol-mig-status-attr:name_id": None, + "os-vol-mig-status-attr:migstat": None, + "os-vol-host-attr:host": "grizzly", + "attachments": [], + }, + "new-sn-disk": { + "status": "creating", + "volume_type": "None", + "availability_zone": "nova", + "created_at": "2013-08-14T16:43:59.000000", + "display_description": "disk created from snapshot", + "metadata": {}, + "snapshot_id": "991cda9c-28bd-420f-8432-f5159def85d6", + "id": "ae2de9eb-32f2-4db7-8ef0-23f0fd0ebf63", + "size": 1, + "os-vol-tenant-attr:tenant_id": fake_request.PROJECT_ID, + "os-vol-mig-status-attr:name_id": None, + "os-vol-mig-status-attr:migstat": None, + "os-vol-host-attr:host": "grizzly", + "attachments": [], + }, +} + + +FAKE_QUOTAS = utils.FakeObject({ + "gigabytes": { + "limit": 1000, + "reserved": 0, + "in_use": 2 + }, + "snapshots": { + "limit": 10, + "reserved": 0, + "in_use": 1 + }, + "human_id": None, + "volumes": { + "limit": 10, + "reserved": 0, + "in_use": 1 + }, + "HUMAN_ID": False +}) + + +class FakeVolumes(object): + def list(self, detailed=True, search_opts=None): + result = FAKE_DISKS + if search_opts: + if "display_name" in search_opts: + result = [d for d in result + if d.display_name == search_opts["display_name"]] + return result + + def get(self, disk): + disk_id = utils.get_id(disk) + for disk in FAKE_DISKS: + if disk.id == disk_id: + return disk + raise exc.NotFound(exc.NotFound.http_status) + + def delete(self, volume): + global FAKE_DISKS + volume_id = utils.get_id(volume) + FAKE_DISKS = [v for v in FAKE_DISKS if v.id != volume_id] + + def create(self, size, snapshot_id=None, source_volid=None, + display_name=None, display_description=None, + volume_type=None, user_id=None, + project_id=None, availability_zone=None, + metadata=None, imageRef=None): + volume = copy.deepcopy(FAKE_NEW_DISKS[display_name]) + volume["display_name"] = display_name + volume["availability_zone"] = availability_zone + volume["display_description"] = display_description + volume["size"] = size + if project_id: + volume["os-vol-tenant-attr:tenant_id"] = project_id + if snapshot_id is not None: + volume["snapshot_id"] = snapshot_id + if imageRef is not None: + volume["volume_image_metadata"] = { + "image_id": imageRef, + "image_name": "fake-image-2" + } + FAKE_DISKS.append(utils.FakeObject(volume)) + return utils.FakeObject(volume) + + +class FakeVolumeSnapshots(object): + def get(self, snapshot): + snapshot_id = utils.get_id(snapshot) + for snapshot in FAKE_SNAPSHOTS: + if snapshot.id == snapshot_id: + return snapshot + raise exc.NotFound(exc.NotFound.http_status) + + def list(self, detailed=True, search_opts=None): + result = FAKE_SNAPSHOTS + if search_opts: + if "display_name" in search_opts: + result = [d for d in result + if d.display_name == search_opts["display_name"]] + return result + + def delete(self, snapshot): + pass + + def create(self, volume_id, force=False, + display_name=None, display_description=None): + return FAKE_SNAPSHOTS[0] + + +class FakeQuotas(object): + def get(self, tenant_id, **kwargs): + if "usage" not in kwargs: + raise exc.BadRequest("There is no arg 'usage' in request") + return FAKE_QUOTAS + + +class FakeCinderClient(object): + def __init__(self, version, *args, **kwargs): + pass + + @property + def client(self): + return self + + @property + def volumes(self): + return FakeVolumes() + + @property + def volume_snapshots(self): + return FakeVolumeSnapshots() + + @property + def quotas(self): + return FakeQuotas() diff --git a/gceapi/tests/api/fake_db.py b/gceapi/tests/api/fake_db.py new file mode 100644 index 0000000..f4dcdb7 --- /dev/null +++ b/gceapi/tests/api/fake_db.py @@ -0,0 +1,316 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import fixtures + +from gceapi import db + + +ITEMS = [ + { + "kind": "network", + "id": "734b9c83-3a8b-4350-8fbf-d40f571ee163", + "creationTimestamp": "2013-12-25T09:05:07.396957Z", + "description": "main network", + }, + { + "kind": "route", + "id": ("734b9c83-3a8b-4350-8fbf-d40f571ee163//" + "eee5ba4f-c67e-40ec-8595-61b8e2bb715a//" + "32.44.64.0/24//" + "10.0.0.32//" + "custom-route-1"), + "creationTimestamp": "2013-12-25T09:05:07.396957Z", + "description": "route for 32.44.64.0/24", + }, + { + "kind": "route", + "id": ("734b9c83-3a8b-4350-8fbf-d40f571ee163//" + "22be757a-a426-42fb-8e4b-b4c876f49f62//" + "40.81.234.0/24//" + "10.0.0.107//" + "obsolete-route"), + "creationTimestamp": "2013-12-25T09:05:07.396957Z", + "description": "route for 40.81.234.0/24", + }, + { + "kind": "instance", + "id": "d6957005-3ce7-4727-91d2-ae37fe5a199a", + "description": "i1 description", + }, + { + "kind": "instance", + "id": "6472359b-d46b-4629-83a9-d2ec8d99468c", + "description": "i2 description", + }, + { + "kind": "access_config", + "id": "i1-192.168.138.196", + "instance_name": "i1", + "nic": "private", + "name": "ip for i1", + "type": "ONE_TO_ONE_NAT", + "addr": "192.168.138.196" + }, + { + "kind": "attached_disk", + "id": "i1-ab8829ad-eec1-44a2-8068-d7f00c53ee90", + "instance_name": "i1", + "name": "christmas-tree", + "volume_id": "ab8829ad-eec1-44a2-8068-d7f00c53ee90" + }, + { + "kind": "image", + "id": "60ff30c2-64b6-4a97-9c17-322eebc8bd60", + "description": "christmas-tree", + "image_ref": "http://fake_url/fake_resource" + }, + { + "kind": "firewall", + "id": "a4ab9c5f-f0b5-4952-8e76-6a8ca0d0a402", + "creationTimestamp": "2013-12-25T09:01:00.396957Z", + "network_name": "private" + }, + { + "kind": "firewall", + "id": "b599598d-41b9-4075-a47e-019ba785c243", + "creationTimestamp": "2013-12-25T09:02:00.396957Z", + "network_name": "private" + }, + { + "kind": "firewall", + "id": "1aaa637b-87f4-4e27-bc86-ff63d30264b2", + "creationTimestamp": "2013-12-25T09:03:00.396957Z", + "network_name": "private" + }, + { + "kind": "operation", + "id": "47be73d8-b8fe-4148-9e3b-3f323136ee57", + "insert_time": "2014-01-20T11:17:39.735738Z", + "start_time": "2014-01-20T11:17:39.935278Z", + "name": "operation-47be73d8-b8fe-4148-9e3b-3f323136ee57", + "type": "add", + "user": "admin", + "status": "RUNNING", + "progress": 0, + "scope_type": "zone", + "scope_name": "nova", + "target_type": "instance", + "target_name": "i1", + "method_key": "instance-add", + "item_id": "d6957005-3ce7-4727-91d2-ae37fe5a199a", + }, + { + "kind": "operation", + "id": "fbd91157-91e9-4121-af74-090260aa38cc", + "insert_time": "2014-01-20T11:17:39.735738Z", + "start_time": "2014-01-20T11:17:39.935278Z", + "name": "operation-fbd91157-91e9-4121-af74-090260aa38cc", + "type": "delete", + "user": "admin", + "status": "RUNNING", + "progress": 0, + "scope_type": "zone", + "scope_name": "nova", + "target_type": "instance", + "target_name": "i-deleted", + "method_key": "instance-delete", + "item_id": "a6d176c9-389b-4a68-94f2-92a4cc276124", + }, + { + "kind": "operation", + "id": "f6fc4e7e2-c0c8-4f97-bf1d-f6f958eb17b7", + "insert_time": "2014-01-20T11:17:39.735738Z", + "start_time": "2014-01-20T11:17:39.935278Z", + "name": "operation-6fc4e7e2-c0c8-4f97-bf1d-f6f958eb17b7", + "type": "reset", + "user": "admin", + "status": "RUNNING", + "progress": 0, + "scope_type": "zone", + "scope_name": "nova", + "target_type": "instance", + "target_name": "i2", + "method_key": "instance-reset", + "item_id": "6472359b-d46b-4629-83a9-d2ec8d99468c", + }, + { + "kind": "operation", + "id": "9417e8bd-e8cc-47a1-86e8-c4c24c043b3d", + "insert_time": "2014-01-20T11:17:39.735738Z", + "start_time": "2014-01-20T11:17:39.935278Z", + "name": "operation-9417e8bd-e8cc-47a1-86e8-c4c24c043b3d", + "type": "add", + "user": "admin", + "status": "RUNNING", + "progress": 0, + "scope_type": "global", + "scope_name": None, + "target_type": "image", + "target_name": "fake-image-1", + "method_key": "image-add", + "item_id": "60ff30c2-64b6-4a97-9c17-322eebc8bd60", + }, + { + "kind": "operation", + "id": "0aad68c4-ee6b-45da-af7e-9e696a885168", + "insert_time": "2014-01-20T11:17:39.735738Z", + "start_time": "2014-01-20T11:17:39.935278Z", + "name": "operation-0aad68c4-ee6b-45da-af7e-9e696a885168", + "type": "delete", + "user": "admin", + "status": "RUNNING", + "progress": 0, + "scope_type": "global", + "scope_name": None, + "target_type": "image", + "target_name": "fake-deleted-image", + "method_key": "image-delete", + "item_id": "10bc8fee-401f-427b-aedc-6d7eb5e19dce", + }, + { + "kind": "operation", + "id": "05e2a2b2-9708-4386-97cc-2318df3357b6", + "insert_time": "2014-01-20T11:17:39.735738Z", + "start_time": "2014-01-20T11:17:39.935278Z", + "name": "operation-05e2a2b2-9708-4386-97cc-2318df3357b6", + "type": "add", + "user": "admin", + "status": "RUNNING", + "progress": 0, + "scope_type": "zone", + "scope_name": "nova", + "target_type": "disk", + "target_name": "fake-disk-1", + "method_key": "disk-add", + "item_id": "e922ebbb-2938-4a12-869f-cbc4e26c6600", + }, + { + "kind": "operation", + "id": "1cfd73fa-9b79-43ef-bbc7-c44bc514ba2e", + "insert_time": "2014-01-20T11:17:39.735738Z", + "start_time": "2014-01-20T11:17:39.935278Z", + "name": "operation-1cfd73fa-9b79-43ef-bbc7-c44bc514ba2e", + "type": "delete", + "user": "admin", + "status": "RUNNING", + "progress": 0, + "scope_type": "zone", + "scope_name": "nova", + "target_type": "disk", + "target_name": "fake-deleted-disk", + "method_key": "disk-delete", + "item_id": "7c97d368-0d8a-4833-9da0-cd58b94660c3", + }, + { + "kind": "operation", + "id": "3f6f1326-3e7c-4076-be6b-939147d031ae", + "insert_time": "2014-01-20T11:17:39.735738Z", + "start_time": "2014-01-20T11:17:39.935278Z", + "name": "operation-3f6f1326-3e7c-4076-be6b-939147d031ae", + "type": "createSnapshot", + "user": "admin", + "status": "RUNNING", + "progress": 0, + "scope_type": "zone", + "scope_name": "nova", + "target_type": "disk", + "target_name": "fake-disk-3", + "method_key": "snapshot-add", + "item_id": "991cda9c-28bd-420f-8432-f5159def85d6", + }, + { + "kind": "operation", + "id": "e72badca-0273-4a69-9303-181df05e602c", + "insert_time": "2014-01-20T11:17:39.735738Z", + "start_time": "2014-01-20T11:17:39.935278Z", + "name": "operation-e72badca-0273-4a69-9303-181df05e602c", + "type": "delete", + "user": "admin", + "status": "RUNNING", + "progress": 0, + "scope_type": "global", + "scope_name": None, + "target_type": "snapshot", + "target_name": "fake-deleted-snapshot", + "method_key": "snapshot-delete", + "item_id": "4a354c43-4750-45cd-8d7f-643afe2946bf", + }, + { + "kind": "operation", + "id": "a7b6bb82-d51f-4f04-a07c-bd9241bc2aac", + "insert_time": "2014-01-20T11:17:39.735738Z", + "start_time": "2014-01-20T11:17:39.935278Z", + "end_time": "2014-01-20T11:17:43.378890Z", + "name": "operation-a7b6bb82-d51f-4f04-a07c-bd9241bc2aac", + "type": "setMetadata", + "user": "admin", + "status": "DONE", + "progress": 100, + "scope_type": "global", + "scope_name": None, + "target_type": "project", + "target_name": None, + }, +] + + +class DBFixture(fixtures.Fixture): + def __init__(self, stubs): + super(DBFixture, self).__init__() + self.stubs = stubs + self.items = copy.copy(ITEMS) + + def setUp(self): + super(DBFixture, self).setUp() + self.stubs.Set(db, "add_item", self.fake_add_item) + self.stubs.Set(db, "update_item", self.fake_update_item) + self.stubs.Set(db, "delete_item", self.fake_delete_item) + self.stubs.Set(db, "get_items", self.fake_get_items) + self.stubs.Set(db, "get_item_by_id", self.fake_get_item_by_id) + self.stubs.Set(db, "get_item_by_name", self.fake_get_item_by_name) + + def fake_add_item(self, context, kind, data): + if any(item["kind"] == kind and item["id"] == data["id"] and + (data.get("name") is None or + item.get("name") == data.get("name") and data.get) + for item in self.items): + raise Exception("Duplicate entry") + item = copy.copy(data) + item["kind"] = kind + self.items.append(item) + return data + + def fake_update_item(self, context, kind, item_data): + db_item = next((item for item in self.items + if (item["kind"] == kind and + item["id"] == item_data["id"]))) + db_item.update(item_data) + + def fake_delete_item(self, context, kind, item_id): + self.items = [item for item in self.items + if item["kind"] == kind and item["id"] == item_id] + + def fake_get_items(self, context, kind): + return [copy.copy(item) for item in self.items + if item["kind"] == kind] + + def fake_get_item_by_id(self, context, kind, item_id): + return next((copy.copy(item) for item in self.items + if item["kind"] == kind and item["id"] == item_id), None) + + def fake_get_item_by_name(self, context, kind, name): + return next((copy.copy(item) for item in self.items + if item["kind"] == kind and item["name"] == name), None) diff --git a/gceapi/tests/api/fake_glance_client.py b/gceapi/tests/api/fake_glance_client.py new file mode 100644 index 0000000..23a909f --- /dev/null +++ b/gceapi/tests/api/fake_glance_client.py @@ -0,0 +1,137 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from glanceclient import exc as glance_exc + +from gceapi.openstack.common import timeutils +from gceapi.tests.api import fake_request +from gceapi.tests.api import utils + + +_TIMESTAMP = timeutils.parse_isotime('2013-08-01T11:30:25') +FAKE_IMAGES = [utils.FakeObject({ + 'id': '60ff30c2-64b6-4a97-9c17-322eebc8bd60', + 'name': 'fake-image-1', + 'created_at': _TIMESTAMP, + 'updated_at': _TIMESTAMP, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': False, + 'container_format': 'bare', + 'disk_format': 'raw', + 'properties': {}, + 'owner': fake_request.PROJECT_ID, + 'protected': False, + 'min_ram': 0, + 'checksum': u'50bdc35edb03a38d91b1b071afb20a3c', + 'min_disk': 0, + 'size': 1 +}), utils.FakeObject({ + 'id': 'a2459075-d96c-40d5-893e-577ff92e721c', + 'name': 'fake-image-2', + 'created_at': _TIMESTAMP, + 'updated_at': _TIMESTAMP, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': True, + 'container_format': 'bare', + 'disk_format': 'raw', + 'properties': {}, + 'owner': fake_request.PROJECT_ID, + 'protected': False, + 'min_ram': 0, + 'checksum': u'20bdc35edb03a38d91b1b071afb20a3c', + 'min_disk': 0, + 'size': 2, + }), utils.FakeObject({ + 'id': '0aa076e2-def4-43d1-ae81-c77a9f9279e6', + 'name': 'image-to-delete', + 'created_at': _TIMESTAMP, + 'updated_at': _TIMESTAMP, + 'deleted_at': None, + 'deleted': False, + 'status': 'active', + 'is_public': True, + 'container_format': 'bare', + 'disk_format': 'raw', + 'properties': {}, + 'owner': fake_request.PROJECT_ID, + 'protected': False, + 'min_ram': 0, + 'checksum': u'20bdc35edb03a38d91b1b071afb20a3c', + 'min_disk': 0, + 'size': 2, +})] + +FAKE_NEW_IMAGE = { + "new-image": { + "id": "6a8fd89a-e636-48a4-8095-5510eab696c4", + "created_at": timeutils.parse_isotime("2013-08-02T11:30:25"), + "size": 5, + } +} + + +class FakeImages(object): + def get(self, image): + image_id = utils.get_id(image) + for i in FAKE_IMAGES: + if i.id == image_id: + return i + + raise glance_exc.HTTPNotFound() + + def list(self, **kwargs): + filters = kwargs.get('filters', {}) + if "name" in filters: + return [i for i in FAKE_IMAGES + if i.name == filters["name"]] + + return FAKE_IMAGES + + def delete(self, image): + image_id = utils.get_id(image) + image_index = 0 + for image in FAKE_IMAGES: + if image.id != image_id: + image_index += 1 + continue + del FAKE_IMAGES[image_index] + return True + raise glance_exc.HTTPNotFound() + + def create(self, **kwargs): + image = copy.deepcopy(FAKE_NEW_IMAGE[kwargs["name"]]) + image.update(kwargs) + image["updated_at"] = image["created_at"] + image.update({ + "deleted_at": False, + "deleted": False, + "status": "active", + }) + FAKE_IMAGES.append(utils.FakeObject(image)) + return copy.deepcopy(utils.FakeObject(image)) + + +class FakeGlanceClient(object): + def __init__(self, version, *args, **kwargs): + pass + + @property + def images(self): + return FakeImages() diff --git a/gceapi/tests/api/fake_keystone_client.py b/gceapi/tests/api/fake_keystone_client.py new file mode 100644 index 0000000..4da0f8d --- /dev/null +++ b/gceapi/tests/api/fake_keystone_client.py @@ -0,0 +1,38 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.tests.api import fake_request +from gceapi.tests.api import utils + + +FAKE_PROJECTS = [utils.FakeObject({ + "name": "fake_project", + "description": None, + "id": fake_request.PROJECT_ID, + "enabled": True +})] + + +class FakeTenants(object): + def list(self): + return FAKE_PROJECTS + + +class FakeKeystoneClient(object): + def __init__(self, **kwargs): + pass + + @property + def tenants(self): + return FakeTenants() diff --git a/gceapi/tests/api/fake_neutron_client.py b/gceapi/tests/api/fake_neutron_client.py new file mode 100644 index 0000000..787cb33 --- /dev/null +++ b/gceapi/tests/api/fake_neutron_client.py @@ -0,0 +1,261 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import uuid + +from gceapi.tests.api import fake_request + + +FAKE_NETWORKS = { + 'networks': [{ + u'status': u'ACTIVE', + u'subnets': [u'cd84a13b-6246-424f-9dd2-04c324ed4da0'], + u'name': u'private', + u'provider:physical_network': None, + u'admin_state_up': True, + u'tenant_id': fake_request.PROJECT_ID, + u'provider:network_type': u'local', + u'router:external': False, + u'shared': False, + u'id': u'734b9c83-3a8b-4350-8fbf-d40f571ee163', + u'provider:segmentation_id': None + }, { + u'status': u'ACTIVE', + u'subnets': [u'7a2800b8-0e66-4271-b26c-6af01dcba66f'], + u'name': u'public', + u'provider:physical_network': None, + u'admin_state_up': True, + u'tenant_id': fake_request.PROJECT_ID, + u'provider:network_type': u'local', + u'router:external': True, + u'shared': False, + u'id': u'7aa33661-33ba-4291-a2c7-44bfd59884c1', + u'provider:segmentation_id': None + }, { + u'status': u'ACTIVE', + u'subnets': [], + u'name': u'public', + u'provider:physical_network': None, + u'admin_state_up': True, + u'tenant_id': u'ae7d3f067c3c4243bb0c6ea0fa8fb6e4', + u'provider:network_type': u'local', + u'router:external': True, + u'shared': False, + u'id': u'439fa4f9-cdd7-4ee2-b3cf-5e764cf644af', + u'provider:segmentation_id': None + }, +]} + +FAKE_SUBNETS = [{ + u'subnet': { + u'name': u'', + u'enable_dhcp': True, + u'network_id': u'734b9c83-3a8b-4350-8fbf-d40f571ee163', + u'tenant_id': fake_request.PROJECT_ID, + u'dns_nameservers': [], + u'allocation_pools': [{ + u'start': u'10.0.0.2', + u'end': u'10.0.0.254' + }], + u'host_routes': [], + u'ip_version': 4, + u'gateway_ip': u'10.0.0.1', + u'cidr': u'10.0.0.0/24', + u'id': u'cd84a13b-6246-424f-9dd2-04c324ed4da0' + } +}, { + u'subnet': { + u'name': u'', + u'enable_dhcp': False, + u'network_id': u'7aa33661-33ba-4291-a2c7-44bfd59884c1', + u'tenant_id': u'ae7d3f067c3c4243bb0c6ea0fa8fb6e4', + u'dns_nameservers': [], + u'allocation_pools': [{ + u'start': u'172.24.4.226', + u'end': u'172.24.4.238' + }], + u'host_routes': [], + u'ip_version': 4, + u'gateway_ip': u'172.24.4.225', + u'cidr': u'172.24.4.224/28', + u'id': u'7a2800b8-0e66-4271-b26c-6af01dcba66f' + } +}] + +FAKE_ROUTERS = [{ + u'id': u'45d8de89-0e40-4d9d-977f-db3573a6e7cf', + u'tenant_id': fake_request.PROJECT_ID, + u'external_gateway_info': { + "network_id": u'503b83b5-bec0-4071-b8ba-789595c8f7b2' + }, + u'routes': [{ + u'destination': u'32.44.64.0/24', + u'nexthop': u'10.0.0.32' + }, { + u'destination': u'89.34.0.0/16', + u'nexthop': u'10.0.0.78' + }], +}] + +FAKE_PORTS = [{ + u'id': u'3e10c6ac-9fcc-492d-95fb-1b7ea93529f2', + u'tenant_id': fake_request.PROJECT_ID, + u'device_owner': u'network:router_gateway', + u'network_id': u'503b83b5-bec0-4071-b8ba-789595c8f7b2', + u'device_id': u'45d8de89-0e40-4d9d-977f-db3573a6e7cf', +}, { + u'id': u'eee5ba4f-c67e-40ec-8595-61b8e2bb715a', + u'tenant_id': fake_request.PROJECT_ID, + u'device_owner': u'network:router_interface', + u'network_id': u'734b9c83-3a8b-4350-8fbf-d40f571ee163', + u'device_id': u'45d8de89-0e40-4d9d-977f-db3573a6e7cf', + u'fixed_ips': [{ + u'subnet_id': u'cd84a13b-6246-424f-9dd2-04c324ed4da0' + }], +}] + + +FAKE_ADDRESSES = { + "floatingips": [{ + u"fixed_ip_address": u"192.168.138.196", + u"floating_ip_address": u"172.24.4.227", + u"floating_network_id": u"7aa33661-33ba-4291-a2c7-44bfd59884c1", + u"id": u"81c45d28-3699-4116-bacd-7488996c5293", + u"port_id": u"8984b23b-f945-4b1e-8eb0-7e735285c0cc", + u"router_id": u"59e96d7b-749d-433e-b592-a55ba94b935e", + u"tenant_id": fake_request.PROJECT_ID + }] +} + + +FAKE_QUOTAS = { + "quota": { + "subnet": 10, + "network": 10, + "floatingip": 50, + "security_group_rule": 100, + "security_group": 10, + "router": 10, + "port": 50 + } +} + + +FAKE_SECURITY_GROUPS = { + "security_groups": [{}, {}] +} + + +class FakeNeutronClient(object): + + def __init__(self, **kwargs): + pass + + def list_networks(self, **search_opts): + networks = [copy.deepcopy(r) for r in FAKE_NETWORKS["networks"] + if all(r.get(a) == search_opts[a] for a in search_opts)] + return {"networks": networks} + + def show_subnet(self, subnet_id): + for subnet in FAKE_SUBNETS: + if subnet["subnet"]["id"] == subnet_id: + return subnet + return None + + def list_subnets(self, retrieve_all=True, **_params): + subnets = [copy.deepcopy(s) for s in FAKE_SUBNETS + if all(s.get(a) == _params[a] for a in _params)] + return {"subnets": subnets} + + def create_network(self, body): + return {u'network': + {u'status': u'ACTIVE', + u'subnets': [], + u'name': body["network"]["name"], + u'provider:physical_network': None, + u'admin_state_up': True, + u'tenant_id': fake_request.PROJECT_ID, + u'provider:network_type': u'local', + u'router:external': False, + u'shared': False, + u'id': u'f1b1bc03-9955-4fd8-bdf9-d2ec7d2777e7', + u'provider:segmentation_id': None}} + + def create_subnet(self, body): + return {u'subnet': + {u'name': u'', + u'enable_dhcp': True, + u'network_id': u'f1b1bc03-9955-4fd8-bdf9-d2ec7d2777e7', + u'tenant_id': fake_request.PROJECT_ID, + u'dns_nameservers': [], + u'allocation_pools': [ + {u'start': u'10.100.0.2', + u'end': u'10.100.0.254'} + ], + u'host_routes': [], + u'ip_version': 4, + u'gateway_ip': u'10.100.0.1', + u'cidr': u'10.100.0.0/24', + u'id': u'9d550616-b294-4897-9eb4-7f998aa7a74e'}} + + def delete_network(self, network_id): + pass + + def list_routers(self, retrieve_all=True, **_params): + routers = [copy.deepcopy(r) for r in FAKE_ROUTERS + if all(r.get(a) == _params[a] for a in _params)] + return {"routers": routers} + + def show_router(self, router): + return {"router": copy.deepcopy(next(r for r in FAKE_ROUTERS + if r["id"] == router))} + + def create_router(self, body=None): + return {"router": {"id": str(uuid.uuid4())}} + + def update_router(self, router, body=None): + pass + + def add_gateway_router(self, router, body=None): + routers = self.list_routers(id=router)["routers"] + if len(routers) == 1: + return {"router": routers[0]} + + def add_interface_router(self, router, body=None): + pass + + def remove_gateway_router(self, router): + pass + + def list_ports(self, *args, **kwargs): + ports = [p for p in FAKE_PORTS + if all(p.get(a) == kwargs[a] for a in kwargs)] + return {"ports": ports} + + def list_floatingips(self, tenant_id): + return FAKE_ADDRESSES + + def create_floatingip(self, body=None): + return {"floatingip": {"id": str(uuid.uuid4()), + "floating_ip_address": "10.20.30.40"}} + + def delete_floatingip(self, floatingip): + pass + + def show_quota(self, tenant_id, **_params): + return FAKE_QUOTAS + + def list_security_groups(self, retrieve_all=True, **_params): + return FAKE_SECURITY_GROUPS diff --git a/gceapi/tests/api/fake_nova_client.py b/gceapi/tests/api/fake_nova_client.py new file mode 100644 index 0000000..ff41140 --- /dev/null +++ b/gceapi/tests/api/fake_nova_client.py @@ -0,0 +1,796 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import inspect +import uuid + +from novaclient import client as novaclient + +from gceapi.api import base_api +from gceapi.openstack.common.gettextutils import _ +from gceapi.tests.api import fake_request +from gceapi.tests.api import utils + + +FAKE_DETAILED_ZONES = [utils.FakeObject({ + "zoneState": { + "available": True}, + "hosts": { + "grizzly": { + "nova-conductor": { + "available": True, + "active": True, + "updated_at": "2013-12-24T14:14:47.000000"}, + "nova-consoleauth": { + "available": True, + "active": True, + "updated_at": "2013-12-24T14:14:49.000000"}, + "nova-scheduler": { + "available": True, + "active": True, + "updated_at": "2013-12-24T14:14:48.000000"}, + "nova-cert": { + "available": True, + "active": True, + "updated_at": "2013-12-24T14:14:49.000000"}}}, + "zoneName": "internal" +}), utils.FakeObject({ + "zoneState": { + "available": True}, + "hosts": { + "grizzly": { + "nova-compute": { + "available": True, + "active": True, + "updated_at": "2013-12-24T14:14:47.000000"}}}, + "zoneName": "nova" +})] + + +FAKE_SIMPLE_ZONES = [utils.FakeObject({ + "zoneState": { + "available": True}, + "hosts": None, + "zoneName": "nova" +})] + + +FAKE_FLAVORS = [utils.FakeObject({ + "name": "m1.small", + "links": [], + "ram": 2048, + "OS-FLV-DISABLED:disabled": False, + "vcpus": 1, + "swap": "", + "os-flavor-access:is_public": True, + "rxtx_factor": 1.0, + "OS-FLV-EXT-DATA:ephemeral": 0, + "disk": 20, + "id": "2" +}), utils.FakeObject({ + "name": "m1.large", + "links": [], + "ram": 8192, + "OS-FLV-DISABLED:disabled": False, + "vcpus": 4, + "swap": "", + "os-flavor-access:is_public": True, + "rxtx_factor": 1.0, + "OS-FLV-EXT-DATA:ephemeral": 870, + "disk": 80, + "id": "4" +})] + + +FAKE_SECURITY_GROUPS = [ + utils.FakeObject({ + "rules": [ + { + "from_port": None, + "ip_protocol": None, + "to_port": None, + "ip_range": {}, + "id": "3f8a140e-8d34-49c5-8cf2-5bec936b6c5c", + }, + { + "from_port": None, + "ip_protocol": None, + "to_port": None, + "ip_range": {}, + "id": "9b0006c7-5e58-4b8e-a081-f0381c44bb2f", + }, + ], + "project_id": "6678c02984ce4df8b26912db30481637", + "id": "2cfdbf3a-0564-4b3b-bb85-00eb8d518f0c", + "name": "default", + "description": "default", + }), + utils.FakeObject({ + "rules": [ + { + "from_port": 223, + "ip_protocol": "udp", + "to_port": 322, + "ip_range": {"cidr": "55.0.0.0/24"}, + "id": "26f6c9e4-d8ca-4a96-b752-b848716f05f5", + }, + { + "from_port": -1, + "ip_protocol": "icmp", + "to_port": -1, + "ip_range": {"cidr": "44.0.0.0/24"}, + "id": "4a2f2805-cde0-4515-9910-f2f8e77ba5f7", + }, + { + "from_port": 1234, + "ip_protocol": "tcp", + "to_port": 1234, + "ip_range": {"cidr": "44.0.0.0/24"}, + "id": "e137dae4-ea4e-401d-8941-96a207e435b9", + }, + { + "from_port": -1, + "ip_protocol": "icmp", + "to_port": -1, + "ip_range": {"cidr": "55.0.0.0/24"}, + "id": "e6a866a8-969a-41d9-b621-964a50f46381", + }, + { + "from_port": 223, + "ip_protocol": "udp", + "to_port": 322, + "ip_range": {"cidr": "44.0.0.0/24"}, + "id": "f830670e-f90f-477f-9605-e871640cf8c2", + }, + { + "from_port": 1234, + "ip_protocol": "tcp", + "to_port": 1234, + "ip_range": {"cidr": "55.0.0.0/24"}, + "id": "fbd2ada0-c5fc-4047-9558-2fb90874c8b3", + }, + ], + "project_id": "6678c02984ce4df8b26912db30481637", + "id": "a4ab9c5f-f0b5-4952-8e76-6a8ca0d0a402", + "name": "fake-firewall-1", + "description": "simple firewall", + }), + utils.FakeObject({ + "rules": [], + "project_id": "6678c02984ce4df8b26912db30481637", + "id": "c3859194-f111-4f24-b93b-095b056f38e2", + "name": "fake-firewall-2", + "description": "openstack sg w/o rules", + }), + utils.FakeObject({ + "rules": [ + { + "from_port": 1000, + "ip_protocol": "tcp", + "to_port": 2000, + "ip_range": {"cidr": "77.0.0.0/24"}, + "id": "01ecc4c4-41be-4af1-9a64-e2f866176001", + }, + { + "from_port": 1000, + "ip_protocol": "tcp", + "to_port": 2000, + "ip_range": {}, + "id": "8709247a-afd8-4673-aac4-e22d8432a31e", + }, + { + "from_port": 1000, + "ip_protocol": "tcp", + "to_port": 2000, + "ip_range": {"cidr": "78.0.0.0/24"}, + "id": "d67e8103-b32b-428b-bd20-8337b95456f1", + }, + ], + "project_id": "6678c02984ce4df8b26912db30481637", + "id": "b599598d-41b9-4075-a47e-019ba785c243", + "name": "fake-firewall-3", + "description": ("openstack sg with cidr & secgroup rules"), + }), + utils.FakeObject({ + "rules": [ + { + "from_port": 5678, + "ip_protocol": "tcp", + "to_port": 5678, + "ip_range": {"cidr": "66.0.0.0/24"}, + "id": "0642de5e-3c59-4c1c-8816-be6998c3c8a2", + }, + { + "from_port": 1234, + "ip_protocol": "tcp", + "to_port": 1234, + "ip_range": {"cidr": "66.0.0.0/24"}, + "id": "b1a2b159-76e5-4baf-926a-a4ce09098377", + }, + { + "from_port": 1234, + "ip_protocol": "tcp", + "to_port": 1234, + "ip_range": {"cidr": "88.0.0.0/24"}, + "id": "f7abbaab-f4fe-49fb-ac7f-bd8c49e60c61", + }, + ], + "project_id": "6678c02984ce4df8b26912db30481637", + "id": "fac84db7-aded-4152-a29e-5db00e052233", + "name": "fake-firewall-4", + "description": ("openstack sg too complex to translate into gce " + "rules"), + }), + utils.FakeObject({ + "rules": [ + { + "from_port": 6666, + "ip_protocol": "tcp", + "to_port": 6666, + "ip_range": {"cidr": "111.0.0.0/24"}, + "id": "634a199e-fb97-41d2-b12f-273c23a1c065" + }, + { + "from_port": 5555, + "ip_protocol": "tcp", + "to_port": 5555, + "ip_range": {"cidr": "222.0.0.0/24"}, + "id": "bf34b3b0-29aa-4abf-a686-f29d9fb342d8" + }, + { + "from_port": -1, + "ip_protocol": "icmp", + "to_port": -1, + "ip_range": {}, + "id": "fdf5dbe1-e824-46a6-a4d3-d2e37843a6d2" + }, + ], + "project_id": "6678c02984ce4df8b26912db30481637", + "id": "03060521-fe0b-425f-bf33-d5061d58bae9", + "name": "fake-firewall-5", + "description": "openstack sg with combined & too complex rules", + }), + utils.FakeObject({ + "rules": [ + { + "from_port": 0, + "ip_protocol": "icmp", + "to_port": 8, + "ip_range": {"cidr": "100.0.0.0/24"}, + "id": "ae452a08-af3b-4d6a-b38e-2f4acad63331", + }, + ], + "project_id": "6678c02984ce4df8b26912db30481637", + "id": "d4c41e39-159c-4f96-8176-86c7b177880f", + "name": "fake-firewall-6", + "description": "openstack sg with too complex icmp rule", + }), + utils.FakeObject({ + "rules": [ + { + "from_port": -1, + "ip_protocol": "icmp", + "to_port": -1, + "ip_range": {"cidr": "110.0.0.0/24"}, + "id": "e2bc37af-529e-4ab3-8f41-358f3f9e62ab", + }, + ], + "project_id": "6678c02984ce4df8b26912db30481637", + "id": "1aaa637b-87f4-4e27-bc86-ff63d30264b2", + "name": "to-delete-firewall", + "description": "firewall to delete test", + }), +] + + +FAKE_INSTANCES = [{ + "OS-DCF:diskConfig": "MANUAL", + "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-SRV-ATTR:host": "apavlov-VirtualBox", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "apavlov-VirtualBox", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", + "OS-EXT-STS:task_state": None, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-STS:power_state": 1, + "OS-SRV-USG:launched_at": "2013-08-14T13:46:23.000000", + "OS-SRV-USG:terminated_at": None, + "addresses": { + "private": [{ + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:ea:ae:56", + "version": 4, + "addr": "10.0.1.3", + "OS-EXT-IPS:type": "fixed" + }, { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:ea:ae:56", + "version": 4, + "addr": "192.168.138.196", + "OS-EXT-IPS:type": "floating" + }] + }, + "image": None, + "flavor": { + "id": "2", + }, + "id": "d6957005-3ce7-4727-91d2-ae37fe5a199a", + "security_groups": [{ + "name": "default" + }], + "name": "i1", + "status": "ACTIVE", + "created": "2013-08-14T13:45:32Z", + "updated": "2013-08-14T13:46:23Z", + "user_id": "0ed9ed7b2004443f802142ecf364738b", + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "config_drive": "", + "hostId": "cbf5e76abf66aa4363dbf17cfe0305093d903fe10389210856d85585", + "key_name": "admin", + "networks": { + u'private': [u'10.0.1.3', u'192.168.138.196'] + }, + "tenant_id": fake_request.PROJECT_ID, + "os-extended-volumes:volumes_attached": [{ + "id": "ab8829ad-eec1-44a2-8068-d7f00c53ee90" + }], + "metadata": {} +}, { + "OS-DCF:diskConfig": "MANUAL", + "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-SRV-ATTR:host": "apavlov-VirtualBox", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "apavlov-VirtualBox", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", + "OS-EXT-STS:task_state": None, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-STS:power_state": 4, + "OS-SRV-USG:terminated_at": None, + "OS-SRV-USG:launched_at": "2013-08-14T13:47:11.000000", + "addresses": { + "default": [{ + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:ea:ae:50", + "version": 4, + "addr": "10.100.0.3", + "OS-EXT-IPS:type": "fixed" + }] + }, + "image": { + "id": "60ff30c2-64b6-4a97-9c17-322eebc8bd60", + }, + "flavor": { + "id": "4", + }, + "id": "6472359b-d46b-4629-83a9-d2ec8d99468c", + "security_groups": [{ + "name": "default" + }], + "name": "i2", + "status": "SUSPENDED", + "created": "2013-08-14T13:46:36Z", + "updated": "2013-08-14T13:47:11Z", + "user_id": "0ed9ed7b2004443f802142ecf364738b", + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "config_drive": "", + "hostId": "cbf5e76abf66aa4363dbf17cfe0305093d903fe10389210856d85585", + "key_name": None, + "networks": { + "default": ["10.100.0.3"] + }, + "tenant_id": fake_request.PROJECT_ID, + "os-extended-volumes:volumes_attached": [], + "metadata": {} +}] + + +FAKE_NEW_INSTANCE = { + "OS-DCF:diskConfig": "MANUAL", + "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-SRV-ATTR:host": "apavlov-VirtualBox", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "apavlov-VirtualBox", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000003", + "OS-EXT-STS:task_state": None, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-STS:power_state": 1, + "OS-SRV-USG:terminated_at": None, + "OS-SRV-USG:launched_at": "2013-08-14T13:47:11.000000", + "addresses": { + "private": [{ + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:ea:ae:55", + "version": 4, + "addr": "10.100.0.4", + "OS-EXT-IPS:type": "fixed" + }] + }, + "flavor": { + "id": "4", + }, + "id": "6472359b-3333-3333-3333-d2ec8d99468c", + "security_groups": [{ + "name": "default" + }], + "name": "i2", + "status": "ACTIVE", + "created": "2013-08-14T13:46:36Z", + "updated": "2013-08-14T13:47:11Z", + "user_id": "0ed9ed7b2004443f802142ecf364738b", + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "config_drive": "", + "hostId": "cbf5e76abf66aa4363dbf17cfe0305093d903fe10389210856d85585", + "key_name": None, + "networks": { + "default": ["10.100.0.3"] + }, + "tenant_id": fake_request.PROJECT_ID, + "os-extended-volumes:volumes_attached": [], + "metadata": {} +} + + +FAKE_FLOATING_IPS = [utils.FakeObject({ + "instance_id": None, + "ip": "192.168.138.195", +}), utils.FakeObject({ + "instance_id": "d6957005-3ce7-4727-91d2-ae37fe5a199a", + "ip": "192.168.138.196", +})] + + +FAKE_LIMITS = utils.FakeObject({ + "rate": [], + "human_id": None, + "NAME_ATTR": "name", + "HUMAN_ID": False, + "absolute": [ + utils.FakeObject({"name": "maxServerMeta", "value": 128}), + utils.FakeObject({"name": "maxPersonality", "value": 5}), + utils.FakeObject({"name": "maxImageMeta", "value": 128}), + utils.FakeObject({"name": "maxPersonalitySize", "value": 10240}), + utils.FakeObject({"name": "maxTotalRAMSize", "value": 41000}), + utils.FakeObject({"name": "maxSecurityGroupRules", "value": 20}), + utils.FakeObject({"name": "maxTotalKeypairs", "value": 100}), + utils.FakeObject({"name": "maxSecurityGroups", "value": 10}), + utils.FakeObject({"name": "maxTotalFloatingIps", "value": 10}), + utils.FakeObject({"name": "maxTotalInstances", "value": 10}), + utils.FakeObject({"name": "maxTotalCores", "value": 17}), + utils.FakeObject({"name": "totalRAMUsed", "value": 512}), + utils.FakeObject({"name": "totalFloatingIpsUsed", "value": 3}), + utils.FakeObject({"name": "totalInstancesUsed", "value": 4}), + utils.FakeObject({"name": "totalSecurityGroupsUsed", "value": 2}), + utils.FakeObject({"name": "totalCoresUsed", "value": 1}), + ] +}) + + +class FakeClassWithFind(object): + def list(self): + pass + + def find(self, **kwargs): + matches = self.findall(**kwargs) + num_matches = len(matches) + if num_matches == 0: + msg = "No %s matching %s." % (self.__class__.__name__, kwargs) + raise novaclient.exceptions.NotFound(404, msg) + elif num_matches > 1: + raise novaclient.exceptions.NoUniqueMatch + else: + return matches[0] + + def findall(self, **kwargs): + found = [] + searches = kwargs.items() + + detailed = True + list_kwargs = {} + + list_argspec = inspect.getargspec(self.list) + if "detailed" in list_argspec.args: + detailed = ("human_id" not in kwargs and + "name" not in kwargs and + "display_name" not in kwargs) + list_kwargs["detailed"] = detailed + + if "is_public" in list_argspec.args and "is_public" in kwargs: + is_public = kwargs["is_public"] + list_kwargs["is_public"] = is_public + if is_public is None: + tmp_kwargs = kwargs.copy() + del tmp_kwargs["is_public"] + searches = tmp_kwargs.items() + + listing = self.list(**list_kwargs) + + for obj in listing: + try: + if all(getattr(obj, attr) == value + for (attr, value) in searches): + if detailed: + found.append(obj) + else: + found.append(self.get(obj.id)) + except AttributeError: + continue + + return found + + +class FakeAvailabilityZones(object): + def list(self, detailed=True): + if detailed: + return FAKE_DETAILED_ZONES + return FAKE_SIMPLE_ZONES + + +class FakeFlavors(FakeClassWithFind): + def list(self, detailed=True, is_public=True): + return FAKE_FLAVORS + + def get(self, flavor): + flavor_id = utils.get_id(flavor) + for flavor in FAKE_FLAVORS: + if flavor.id == flavor_id: + return flavor + raise novaclient.exceptions.NotFound( + novaclient.exceptions.NotFound.http_status) + + +class FakeKeypairs(object): + def get(self, keypair): + raise novaclient.exceptions.NotFound( + novaclient.exceptions.NotFound.http_status) + + def create(self, name, public_key=None): + pass + + def delete(self, key): + raise novaclient.exceptions.NotFound( + novaclient.exceptions.NotFound.http_status) + + def list(self): + return [] + + +class FakeServer(utils.FakeObject): + + _manager = None + + def __init__(self, manager, obj_dict): + super(FakeServer, self).__init__(obj_dict) + self._manager = manager + + def reboot(self, reboot_type): + self._manager.reboot(self, reboot_type) + + def add_security_group(self, security_group): + pass + + def remove_security_group(self, security_group): + pass + + def delete(self): + self._manager.delete(self) + + def add_floating_ip(self, address, fixed_address=None): + pass + + def remove_floating_ip(self, address): + pass + + +class FakeServers(object): + _fake_instances = None + + def __init__(self): + self._fake_instances = [FakeServer(self, i) + for i in FAKE_INSTANCES] + + def get(self, server): + server_id = utils.get_id(server) + for server in self._fake_instances: + if server.id == server_id: + return server + raise novaclient.exceptions.NotFound( + novaclient.exceptions.NotFound.http_status) + + def list(self, detailed=True, search_opts=None, + marker=None, limit=None): + result = self._fake_instances + if search_opts and "name" in search_opts: + name = search_opts["name"] + result = [i for i in result if i.name == name] + if search_opts and "fixed_ip" in search_opts: + name = search_opts["fixed_ip"] + filtered = [] + for i in result: + for network in i.addresses: + for address in i.addresses[network]: + atype = address["OS-EXT-IPS:type"] + if atype == "fixed": + filtered.append(i) + break + + result = filtered + + return result + + def create(self, name, image, flavor, meta=None, files=None, + reservation_id=None, min_count=None, + max_count=None, security_groups=None, userdata=None, + key_name=None, availability_zone=None, + block_device_mapping=None, block_device_mapping_v2=None, + nics=None, scheduler_hints=None, + config_drive=None, disk_config=None, **kwargs): + instance = copy.deepcopy(FakeServer(self, FAKE_NEW_INSTANCE)) + instance.name = name + self._fake_instances.append(instance) + return instance + + def add_floating_ip(self, server, address, fixed_address=None): + self.get(server) + + def remove_floating_ip(self, server, address): + self.get(server) + + def delete(self, server): + self.get(server) + + def reboot(self, server, reboot_type): + if reboot_type != "HARD": + msg = _("Argument 'type' for reboot is not HARD or SOFT") + raise novaclient.exceptions.BadRequest(message=msg) + self.get(server) + + +class FakeSecurityGroups(FakeClassWithFind): + _secgroups = FAKE_SECURITY_GROUPS + + def list(self): + return self._secgroups + + def get(self, sg_id): + secgroup = next((secgroup + for secgroup in self._secgroups + if secgroup.id == sg_id), None) + if secgroup is None: + raise novaclient.exceptions.NotFound( + 404, "Security group %s not found" % sg_id) + return secgroup + + def create(self, name, description): + secgroup = utils.FakeObject({ + "name": name, + "description": description, + "rules": [], + "project_id": "6678c02984ce4df8b26912db30481637", + "id": "5707a6f0-799d-4739-8740-3efc73f122aa", + }) + self._secgroups = copy.deepcopy(self._secgroups) + self._secgroups.append(secgroup) + return secgroup + + def delete(self, security_group): + pass + + def add_rule(self, sg_id, ip_protocol, from_port, to_port, cidr): + secgroup = self.get(sg_id) + rule = { + "id": uuid.uuid4(), + "ip_protocol": ip_protocol, + "from_port": from_port, + "to_port": to_port, + "ip_range": {"cidr": cidr}, + } + secgroup.rules.append(rule) + + +class FakeSecurityGroupRules(object): + def __init__(self, nova_client): + self.security_groups = nova_client.security_groups + + def create(self, sg_id, ip_protocol, from_port, to_port, cidr): + self.security_groups.add_rule(sg_id, ip_protocol, from_port, + to_port, cidr) + + +class FakeFloatingIps(object): + def list(self): + return FAKE_FLOATING_IPS + + +class FakeLimits(object): + def get(self, tenant_id): + return FAKE_LIMITS + + +class FakeVolumes(object): + def get_server_volumes(self, instance_id): + return [] + + def create_server_volume(self, instance_id, volume_id, device): + instance = FakeServers().get(instance_id) + volumes = getattr(instance, "os-extended-volumes:volumes_attached") + volumes = [v["id"] for v in volumes] + if volume_id in volumes: + raise novaclient.exceptions.NotFound( + novaclient.exceptions.NotFound.http_status) + + def delete_server_volume(self, instance_id, volume_id): + instance = FakeServers().get(instance_id) + volumes = getattr(instance, "os-extended-volumes:volumes_attached") + volumes = [v["id"] for v in volumes] + if volume_id not in volumes: + raise novaclient.exceptions.NotFound( + novaclient.exceptions.NotFound.http_status) + + +class FakeNovaClient(object): + + KIND = "fake_novaclient" + __metaclass__ = base_api.Singleton + + def __init__(self, version, *args, **kwargs): + self._servers = None + self._security_group = None + + @property + def client(self): + return self + + @property + def availability_zones(self): + return FakeAvailabilityZones() + + @property + def flavors(self): + return FakeFlavors() + + @property + def keypairs(self): + return FakeKeypairs() + + @property + def servers(self): + if self._servers is None: + self._servers = FakeServers() + return self._servers + + @property + def security_groups(self): + if self._security_group is None: + self._security_group = FakeSecurityGroups() + return self._security_group + + @property + def security_group_rules(self): + return FakeSecurityGroupRules(self) + + @property + def floating_ips(self): + return FakeFloatingIps() + + @property + def limits(self): + return FakeLimits() + + @property + def volumes(self): + return FakeVolumes() + + +def fake_discover_extensions(self, version): + return list() diff --git a/gceapi/tests/api/fake_request.py b/gceapi/tests/api/fake_request.py new file mode 100644 index 0000000..7b19b24 --- /dev/null +++ b/gceapi/tests/api/fake_request.py @@ -0,0 +1,84 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi import context +from gceapi import wsgi_ext as os_wsgi + + +PROJECT_ID = "4a5cc7d8893544a9babb3b890227d75e" + + +FAKE_SERVICE_CATALOG = [{ + u'endpoints': [{ + u'adminURL': u'http://192.168.137.21:8774/v2/' + PROJECT_ID, + u'region': u'RegionOne', + u'id': u'81a8b36abc5f4945bbd1269be0423012', + u'internalURL': u'http://192.168.137.21:8774/v2/' + PROJECT_ID, + u'publicURL': u'http://192.168.137.21:8774/v2/' + PROJECT_ID}], + u'endpoints_links': [], + u'type': u'compute', + u'name': u'nova' +}, { + u'endpoints': [{ + u'adminURL': u'http://192.168.137.21:9696/', + u'region': u'RegionOne', + u'id': u'10a0fc598a5741c390f0d6560a89fced', + u'internalURL': u'http://192.168.137.21:9696/', + u'publicURL': u'http://192.168.137.21:9696/'}], + u'endpoints_links': [], + u'type': u'network', + u'name': u'neutron' +}, { + u'endpoints': [{ + u'adminURL': u'http://192.168.137.21:9292', + u'region': u'RegionOne', + u'id': u'39643060448c4c089535fce07f2d2aa4', + u'internalURL': u'http://192.168.137.21:9292', + u'publicURL': u'http://192.168.137.21:9292'}], + u'endpoints_links': [], + u'type': u'image', + u'name': u'glance' +}, { + u'endpoints': [{ + u'adminURL': u'http://192.168.137.21:8776/v1/' + PROJECT_ID, + u'region': u'RegionOne', + u'id': u'494bd5333aed467092316e03b1163139', + u'internalURL': u'http://192.168.137.21:8776/v1/' + PROJECT_ID, + u'publicURL': u'http://192.168.137.21:8776/v1/' + PROJECT_ID}], + u'endpoints_links': [], + u'type': u'volume', + u'name': u'cinder' +}] + + +class HTTPRequest(os_wsgi.Request): + + @classmethod + def blank(cls, url, has_body=False, *args, **kwargs): + kwargs['base_url'] = 'http://localhost/compute/v1beta15/projects' + if has_body: + kwargs.setdefault("content_type", "application/json") + out = os_wsgi.Request.blank(url, *args, **kwargs) + user_id = 'c2bc8099-8861-46ab-a416-99f06bb89198' + user_name = 'fake_user' + project_id = PROJECT_ID + project_name = 'fake_project' + fake_context = context.RequestContext(user_id, + project_id, + user_name=user_name, + project_name=project_name, + is_admin=True) + fake_context.service_catalog = FAKE_SERVICE_CATALOG + out.environ['gceapi.context'] = fake_context + return out diff --git a/gceapi/tests/api/test_addresses.py b/gceapi/tests/api/test_addresses.py new file mode 100644 index 0000000..a911270 --- /dev/null +++ b/gceapi/tests/api/test_addresses.py @@ -0,0 +1,147 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import addresses +from gceapi.tests.api import common + +EXPECTED_ADDRESSES = [{ + "kind": "compute#address", + "id": "2729532145628373701", + "creationTimestamp": "", + "status": "IN USE", + "region": "http://localhost/compute/v1beta15/projects/" + "fake_project/regions/nova", + "name": "address-172-24-4-227", + "description": "", + "address": "172.24.4.227", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/regions/nova/addresses/address-172-24-4-227", + "users": ["http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/instances/i1"] +}] + + +class AddressesTest(common.GCEControllerTest): + + def setUp(self): + super(AddressesTest, self).setUp() + self.controller = addresses.Controller() + + def test_get_address_by_invalid_name(self): + response = self.request_gce("/fake_project/regions/" + "nova/addresses/fake") + self.assertEqual(404, response.status_int) + + def test_get_address_by_name(self): + response = self.request_gce("/fake_project/regions/" + "nova/addresses/address-172-24-4-227") + + self.assertEqual(200, response.status_int) + self.assertEqual(response.json_body, EXPECTED_ADDRESSES[0]) + + def test_get_address_list_filtered(self): + response = self.request_gce("/fake_project/regions/nova/addresses" + "?filter=name+eq+address-172-24-4-227") + expected = { + "kind": "compute#addressList", + "id": "projects/fake_project/regions/nova/addresses", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/regions/nova/addresses", + "items": [EXPECTED_ADDRESSES[0]] + } + + self.assertEqual(response.json_body, expected) + + def test_get_address_list(self): + response = self.request_gce("/fake_project/regions/nova/addresses") + expected = { + "kind": "compute#addressList", + "id": "projects/fake_project/regions/nova/addresses", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/regions/nova/addresses", + "items": EXPECTED_ADDRESSES + } + + self.assertEqual(response.json_body, expected) + + def test_get_address_aggregated_list_filtered(self): + response = self.request_gce("/fake_project/aggregated/addresses" + "?filter=name+eq+address-172-24-4-227") + + expected = { + "kind": "compute#addressAggregatedList", + "id": "projects/fake_project/aggregated/addresses", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/aggregated/addresses", + "items": { + "regions/nova": { + "addresses": [EXPECTED_ADDRESSES[0]] + }, + } + } + + self.assertEqual(response.json_body, expected) + + def test_get_address_aggregated_list(self): + response = self.request_gce("/fake_project/aggregated/addresses") + + expected = { + "kind": "compute#addressAggregatedList", + "id": "projects/fake_project/aggregated/addresses", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/aggregated/addresses", + "items": { + "regions/nova": { + "addresses": EXPECTED_ADDRESSES + }, + } + } + + self.assertEqual(response.json_body, expected) + + def test_delete_address_with_invalid_name(self): + response = self.request_gce("/fake_project/regions/nova" + "/addresses/fake-address", method="DELETE") + self.assertEqual(404, response.status_int) + + def test_delete_address(self): + response = self.request_gce( + "/fake_project/regions/nova/addresses/address-172-24-4-227", + method="DELETE") + expected = { + "operationType": "delete", + "targetId": "2729532145628373701", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/regions/nova/addresses/address-172-24-4-227", + } + expected.update(common.COMMON_REGION_FINISHED_OPERATION) + self.assertEqual(200, response.status_int) + self.assertDictEqual(expected, response.json_body) + + def test_create_address(self): + request_body = { + "name": "fake-address", + } + response = self.request_gce("/fake_project/regions/nova/addresses", + method="POST", + body=request_body) + self.assertEqual(200, response.status_int) + expected = { + "operationType": "insert", + "targetId": "5571612063911429008", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/regions/nova/addresses/fake-address", + } + expected.update(common.COMMON_REGION_FINISHED_OPERATION) + self.assertDictEqual(expected, response.json_body) diff --git a/gceapi/tests/api/test_disks.py b/gceapi/tests/api/test_disks.py new file mode 100644 index 0000000..39f0782 --- /dev/null +++ b/gceapi/tests/api/test_disks.py @@ -0,0 +1,256 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from gceapi.tests.api import common + + +EXPECTED_DISK_1 = { + "status": "READY", + "sourceSnapshot": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/snapshots/fake-snapshot", + "kind": "compute#disk", + "name": "fake-disk-1", + "sizeGb": 2, + "sourceSnapshotId": "991cda9c-28bd-420f-8432-f5159def85d6", + "zone": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova", + "creationTimestamp": "2013-08-14T12:35:22Z", + "id": "9202387718698825408", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/disks/fake-disk-1", + "description": "fake disk from snapshot", +} +EXPECTED_DISK_2 = { + "status": "READY", + "sizeGb": 1, + "kind": "compute#disk", + "name": "fake-disk-2", + "zone": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova", + "creationTimestamp": "2013-08-14T12:19:35Z", + "id": "9202387718698825405", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/disks/fake-disk-2", + "description": "", + "sourceImage": "http://localhost/compute/v1beta15/projects/fake_project" + "/global/images/fake-image-1", + "sourceImageId": "5721131091780319465", +} +EXPECTED_DISK_3 = { + "status": "READY", + "sizeGb": 3, + "kind": "compute#disk", + "name": "fake-disk-3", + "zone": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova", + "creationTimestamp": "2013-08-14T11:57:44Z", + "id": "9202387718698825406", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/disks/fake-disk-3", + "description": "full description of disk", +} +NEW_DISK = { + "status": "READY", + "sizeGb": 15, + "kind": "compute#disk", + "name": "new-disk", + "zone": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova", + "creationTimestamp": "2013-08-14T15:00:22Z", + "id": "5151144363316117590", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/disks/new-disk", + "description": None, +} +NEW_IMAGE_DISK = { + "status": "READY", + "kind": "compute#disk", + "name": "new-image-disk", + "sizeGb": 1, + "sourceImage": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/images/fake-image-2", + "sourceImageId": "5721131091780319468", + "zone": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova", + "creationTimestamp": "2013-08-14T15:56:00Z", + "id": "3094468787955188924", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/disks/new-image-disk", + "description": "disk created with image", +} +NEW_SN_DISK = { + "status": "CREATING", + "sourceSnapshot": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/snapshots/fake-snapshot", + "kind": "compute#disk", + "name": "new-sn-disk", + "sizeGb": 25, + "sourceSnapshotId": "991cda9c-28bd-420f-8432-f5159def85d6", + "zone": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova", + "creationTimestamp": "2013-08-14T16:43:59Z", + "id": "5322910296130766655", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/disks/new-sn-disk", + "description": "disk created from snapshot" +} + + +class DisksControllerTest(common.GCEControllerTest): + def setUp(self): + super(DisksControllerTest, self).setUp() + + def test_get_disk_list_filterd(self): + response = self.request_gce("/fake_project/aggregated/disks" + "?filter=name+eq+fake-disk-3") + self.assertEqual(200, response.status_int) + response_body = copy.deepcopy(response.json_body) + self.assertIn("items", response_body) + self.assertIn("zones/nova", response_body["items"]) + expected_common = { + "kind": "compute#diskAggregatedList", + "id": "projects/fake_project/aggregated/disks", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/aggregated/disks", + "items": { + "zones/nova": {}, + }, + } + response_disks = response_body["items"]["zones/nova"].pop("disks") + self.assertDictEqual(expected_common, response_body) + self.assertDictInListBySelfLink(EXPECTED_DISK_3, response_disks) + + def test_get_disk_list(self): + response = self.request_gce("/fake_project/aggregated/disks") + self.assertEqual(200, response.status_int) + response_body = copy.deepcopy(response.json_body) + self.assertIn("items", response_body) + self.assertIn("zones/nova", response_body["items"]) + expected_common = { + "kind": "compute#diskAggregatedList", + "id": "projects/fake_project/aggregated/disks", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/aggregated/disks", + "items": { + "zones/nova": {}, + }, + } + response_disks = response_body["items"]["zones/nova"].pop("disks") + self.assertDictEqual(expected_common, response_body) + self.assertDictInListBySelfLink(EXPECTED_DISK_1, response_disks) + self.assertDictInListBySelfLink(EXPECTED_DISK_2, response_disks) + self.assertDictInListBySelfLink(EXPECTED_DISK_3, response_disks) + + def test_get_disk_by_name(self): + response = self.request_gce( + "/fake_project/zones/nova/disks/fake-disk-1") + self.assertEqual(200, response.status_int) + self.assertDictEqual(EXPECTED_DISK_1, response.json_body) + + def test_get_disk_by_invalid_name(self): + response = self.request_gce( + "/fake_project/zones/nova/disks/fake-disk") + self.assertEqual(404, response.status_int) + + def test_create_disk(self): + request_body = { + "name": "new-disk", + "sizeGb": "15", + } + response = self.request_gce("/fake_project/zones/nova/disks", + method="POST", + body=request_body) + self.assertEqual(200, response.status_int) + expected = { + "targetId": "5151144363316117590", + "operationType": "insert", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/disks/new-disk", + } + expected.update(common.COMMON_ZONE_PENDING_OPERATION) + self.assertDictEqual(expected, response.json_body) + response = self.request_gce( + "/fake_project/zones/nova/disks/new-disk") + self.assertEqual(200, response.status_int) + self.assertDictEqual(NEW_DISK, response.json_body) + + def test_create_disk_by_image(self): + request_body = { + "name": "new-image-disk", + "description": "disk created with image" + } + response = self.request_gce( + "/fake_project/zones/nova/disks?sourceImage=fake-image-2", + method="POST", + body=request_body) + self.assertEqual(200, response.status_int) + expected = { + "operationType": "insert", + "targetId": "3094468787955188924", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/disks/new-image-disk", + } + expected.update(common.COMMON_ZONE_PENDING_OPERATION) + self.assertDictEqual(expected, response.json_body) + response = self.request_gce( + "/fake_project/zones/nova/disks/new-image-disk") + self.assertEqual(200, response.status_int) + self.assertDictEqual(NEW_IMAGE_DISK, response.json_body) + + def test_create_disk_by_snapshot(self): + request_body = { + "name": "new-sn-disk", + "sizeGb": "25", + "sourceSnapshot": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/snapshots/fake-snapshot", + "description": "disk created from snapshot" + } + response = self.request_gce( + "/fake_project/zones/nova/disks", + method="POST", + body=request_body) + self.assertEqual(200, response.status_int) + expected = { + "operationType": "insert", + "targetId": "5322910296130766655", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/disks/new-sn-disk", + } + expected.update(common.COMMON_ZONE_PENDING_OPERATION) + self.assertDictEqual(expected, response.json_body) + response = self.request_gce( + "/fake_project/zones/nova/disks/new-sn-disk") + self.assertEqual(200, response.status_int) + self.assertDictEqual(NEW_SN_DISK, response.json_body) + + def test_delete_disk(self): + response = self.request_gce( + "/fake_project/zones/nova/disks/disk-to-delete", + method="DELETE") + expected = { + "operationType": "delete", + "targetId": "7382604722864765133", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/disks/disk-to-delete", + } + expected.update(common.COMMON_ZONE_PENDING_OPERATION) + self.assertEqual(200, response.status_int) + self.assertDictEqual(expected, response.json_body) + + def test_delete_disk_with_invalid_name(self): + response = self.request_gce('/fake_project/zones/nova/disks/fake-disk', + method="DELETE") + self.assertEqual(404, response.status_int) diff --git a/gceapi/tests/api/test_fields.py b/gceapi/tests/api/test_fields.py new file mode 100644 index 0000000..04e9ad7 --- /dev/null +++ b/gceapi/tests/api/test_fields.py @@ -0,0 +1,65 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import utils +from gceapi import test + + +class FieldsTest(test.TestCase): + """Test for parsing fields params.""" + + def setUp(self): + """Run before each test.""" + super(FieldsTest, self).setUp() + + def test_apply_template(self): + fields1 = 'one,two,three(smth,else/one)' + fields2 = 'one/smth,five/smth' + wrongfields1 = 'zero,one/smth' + wrongfields2 = 'fgdfds)9342' + dct = {'one': {'smth': 1, + 'else': 0}, + 'two': 2, + 'three': [{'smth': 3, + 'another': 'string', + 'else': {'one': 1}}], + 'four': 4, + 'five': {'smth': 5} + } + + expected1 = {'one': {'smth': 1, + 'else': 0}, + 'two': 2, + 'three': [{'smth': 3, + 'else': {'one': 1}}] + } + + expected2 = {'one': {'smth': 1}, + 'five': {'smth': 5} + } + + res1 = utils.apply_template(fields1, dct) + res2 = utils.apply_template(fields2, dct) + self.assertEqual(res1, expected1) + self.assertEqual(res2, expected2) + + self.assertRaises(ValueError, utils.apply_template, wrongfields1, dct) + self.assertRaises(ValueError, utils.apply_template, wrongfields2, dct) + + def test_split_by_comma(self): + string = 'bla,bla,smth/else,another(bla,bla/bla),yet/bla' + expected = ['bla', 'bla', 'smth/else', 'another(bla,bla/bla)', + 'yet/bla'] + res = utils.split_by_comma(string) + self.assertEqual(res, expected) diff --git a/gceapi/tests/api/test_firewalls.py b/gceapi/tests/api/test_firewalls.py new file mode 100644 index 0000000..ec3a385 --- /dev/null +++ b/gceapi/tests/api/test_firewalls.py @@ -0,0 +1,291 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +import gceapi.context +from gceapi.tests.api import common + + +DEFAULT_FIREWALL = { + "kind": "compute#firewall", + "name": "default", + "creationTimestamp": "", + "sourceRanges": [ + "0.0.0.0/0", + ], + "allowed": [], + "id": "1000226411104458008", + "selfLink": ("http://localhost/compute/v1beta15/projects" + "/fake_project/global/firewalls/default"), + "description": "[+]default", +} +FAKE_FIREWALL_1 = { + "kind": "compute#firewall", + "name": "fake-firewall-1", + "creationTimestamp": "2013-12-25T09:01:00.396957Z", + "sourceRanges": [ + "55.0.0.0/24", + "44.0.0.0/24", + ], + "allowed": [ + { + "IPProtocol": "udp", + "ports": [ + "223-322", + ], + }, + { + "IPProtocol": "icmp", + }, + { + "IPProtocol": "tcp", + "ports": [ + "1234", + ], + }, + ], + "id": "5486539087303205175", + "network": ("http://localhost/compute/v1beta15/projects" + "/fake_project/global/networks/private"), + "selfLink": ("http://localhost/compute/v1beta15/projects" + "/fake_project/global/firewalls/fake-firewall-1"), + "description": "simple firewall", +} +FAKE_FIREWALL_2 = { + "kind": "compute#firewall", + "name": "fake-firewall-2", + "creationTimestamp": "", + "sourceRanges": [ + "0.0.0.0/0", + ], + "allowed": [], + "id": "5486539087303205174", + "selfLink": ("http://localhost/compute/v1beta15/projects" + "/fake_project/global/firewalls/fake-firewall-2"), + "description": "openstack sg w/o rules", +} +FAKE_FIREWALL_3 = { + "kind": "compute#firewall", + "name": "fake-firewall-3", + "creationTimestamp": "2013-12-25T09:02:00.396957Z", + "sourceRanges": [ + "77.0.0.0/24", + "78.0.0.0/24", + ], + "allowed": [ + { + "IPProtocol": "tcp", + "ports": [ + "1000-2000", + ], + }, + ], + "id": "5486539087303205173", + "network": ("http://localhost/compute/v1beta15/projects/" + "fake_project/global/networks/private"), + "selfLink": ("http://localhost/compute/v1beta15/projects" + "/fake_project/global/firewalls/fake-firewall-3"), + "description": "[+]openstack sg with cidr & secgroup rules", +} +FAKE_FIREWALL_4 = { + "kind": "compute#firewall", + "name": "fake-firewall-4", + "creationTimestamp": "", + "sourceRanges": [], + "allowed": [], + "id": "5486539087303205172", + "selfLink": ("http://localhost/compute/v1beta15/projects" + "/fake_project/global/firewalls/fake-firewall-4"), + "description": "[*]openstack sg too complex to translate into gce rules", +} +FAKE_FIREWALL_5 = { + "kind": "compute#firewall", + "name": "fake-firewall-5", + "creationTimestamp": "", + "sourceRanges": [], + "allowed": [], + "id": "5486539087303205171", + "selfLink": ("http://localhost/compute/v1beta15/projects" + "/fake_project/global/firewalls/fake-firewall-5"), + "description": "[*][+]openstack sg with combined & too complex rules", +} +FAKE_FIREWALL_6 = { + "kind": "compute#firewall", + "name": "fake-firewall-6", + "creationTimestamp": "", + "sourceRanges": [], + "allowed": [], + "id": "5486539087303205170", + "selfLink": ("http://localhost/compute/v1beta15/projects" + "/fake_project/global/firewalls/fake-firewall-6"), + "description": "[*]openstack sg with too complex icmp rule", +} +NEW_FIREWALL = { + "kind": "compute#firewall", + "name": "new-firewall", + "creationTimestamp": "2013-12-25T09:03:00.396957Z", + "sourceRanges": [ + "42.0.0.0/24", + "41.0.0.0/24", + ], + "allowed": [ + { + "IPProtocol": "udp", + "ports": [ + "5000-6000", "6666", + ], + }, + { + "IPProtocol": "icmp", + }, + { + "IPProtocol": "tcp", + "ports": [ + "80", "8080", + ], + }, + ], + "id": "8518771050733866051", + "network": ("http://localhost/compute/v1beta15/projects" + "/fake_project/global/networks/private"), + "selfLink": ("http://localhost/compute/v1beta15/projects" + "/fake_project/global/firewalls/new-firewall"), + "description": "new fake firewall", +} + + +class FirewallsControllerTest(common.GCEControllerTest): + + def setUp(self): + super(FirewallsControllerTest, self).setUp() + + def test_list_firewalls_filtered(self): + response = self.request_gce("/fake_project/global/firewalls" + "?filter=name+eq+fake-firewall-5") + self.assertEqual(200, response.status_int) + response_body = copy.deepcopy(response.json_body) + self.assertIn("items", response_body) + expected_common = { + "kind": "compute#firewallList", + "id": "projects/fake_project/global/firewalls", + "selfLink": ("http://localhost/compute/v1beta15/projects/" + "fake_project/global/firewalls") + } + response_firewalls = response_body.pop("items") + self.assertDictEqual(expected_common, response_body) + self.assertDictInListBySelfLink(FAKE_FIREWALL_5, response_firewalls) + + def test_list_firewalls(self): + response = self.request_gce("/fake_project/global/firewalls") + self.assertEqual(200, response.status_int) + response_body = copy.deepcopy(response.json_body) + self.assertIn("items", response_body) + expected_common = { + "kind": "compute#firewallList", + "id": "projects/fake_project/global/firewalls", + "selfLink": ("http://localhost/compute/v1beta15/projects/" + "fake_project/global/firewalls") + } + response_firewalls = response_body.pop("items") + self.assertDictEqual(expected_common, response_body) + self.assertDictInListBySelfLink(DEFAULT_FIREWALL, response_firewalls) + self.assertDictInListBySelfLink(FAKE_FIREWALL_1, response_firewalls) + self.assertDictInListBySelfLink(FAKE_FIREWALL_2, response_firewalls) + self.assertDictInListBySelfLink(FAKE_FIREWALL_3, response_firewalls) + self.assertDictInListBySelfLink(FAKE_FIREWALL_4, response_firewalls) + self.assertDictInListBySelfLink(FAKE_FIREWALL_5, response_firewalls) + self.assertDictInListBySelfLink(FAKE_FIREWALL_6, response_firewalls) + + def test_get_firewall(self): + response = self.request_gce( + "/fake_project/global/firewalls/fake-firewall-1") + self.assertEqual(200, response.status_int) + self.assertDictEqual(FAKE_FIREWALL_1, response.json_body) + + def test_create_firewall(self): + self.add_to_instance_was_called = False + + def add_to_instance(dummy, context, instance, sg_id): + self.assertIsInstance(context, gceapi.context.RequestContext) + self.assertEqual("6472359b-d46b-4629-83a9-d2ec8d99468c", + instance["uuid"]) + self.assertEqual("5707a6f0-799d-4739-8740-3efc73f122aa", sg_id) + self.add_to_instance_was_called = True + + request_body = { + "network": ("http://localhost/compute/v1beta15/projects" + "/fake_project/global/networks/private"), + "rules": [], + "description": "new fake firewall", + "sourceRanges": ["41.0.0.0/24", "42.0.0.0/24"], + "allowed": [ + {"IPProtocol": "icmp"}, + {"IPProtocol": "tcp", "ports": ["80", "8080"]}, + {"IPProtocol": "udp", "ports": ["5000-6000", "6666"]}, + ], + "name": "new-firewall", + } + response = self.request_gce("/fake_project/global/firewalls", + method="POST", + body=request_body) + self.assertEqual(200, response.status_int) + expected = { + "operationType": "insert", + "targetId": "8518771050733866051", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/firewalls/new-firewall", + } + expected.update(common.COMMON_FINISHED_OPERATION) + self.assertDictEqual(expected, response.json_body) + # TODO(apavlov): reanimate this + #self.assertTrue(self.add_to_instance_was_called) + #response = self.request_gce( + # "/fake_project/global/firewalls/new-firewall") + #self.assertEqual(200, response.status_int) + #self.assertDictEqual(NEW_FIREWALL, response.json_body) + + def test_delete_firewall(self): + self.remove_from_instance_was_called = False + + def remove_from_instance(dummy, context, instance, sg_name): + self.assertIsInstance(context, gceapi.context.RequestContext) + self.assertEqual("6472359b-d46b-4629-83a9-d2ec8d99468c", + instance["uuid"]) + self.assertEqual("1aaa637b-87f4-4e27-bc86-ff63d30264b2", sg_name) + self.remove_from_instance_was_called = True + + response = self.request_gce( + "/fake_project/global/firewalls/to-delete-firewall", + method="DELETE") + self.assertEqual(200, response.status_int) + expected = { + "operationType": "delete", + "targetId": "7536069615864894672", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/firewalls/to-delete-firewall", + } + expected.update(common.COMMON_FINISHED_OPERATION) + self.assertDictEqual(expected, response.json_body) + # TODO(apavlov): reanimate this + #self.assertTrue(self.remove_from_instance_was_called) + #response = self.request_gce( + # "/fake_project/global/firewalls/to-delete-firewall") + #self.assertEqual(404, response.status_int) + + def test_delete_firewall_nonexistent(self): + response = self.request_gce( + "/fake_project/global/firewalls/fake-firewall", + method="DELETE") + self.assertEqual(404, response.status_int) diff --git a/gceapi/tests/api/test_images.py b/gceapi/tests/api/test_images.py new file mode 100644 index 0000000..ac132fd --- /dev/null +++ b/gceapi/tests/api/test_images.py @@ -0,0 +1,167 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from gceapi.tests.api import common + +EXPECTED_IMAGE_1 = { + "kind": "compute#image", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/images/fake-image-1", + "id": "5721131091780319465", + "creationTimestamp": "2013-08-01T11:30:25Z", + "name": "fake-image-1", + "description": "christmas-tree", + "sourceType": "RAW", + "rawDisk": { + "containerType": "TAR", + "source": "http://fake_url/fake_resource", + }, + "status": "READY", + "archiveSizeBytes": 1 +} +EXPECTED_IMAGE_2 = { + "kind": "compute#image", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/images/fake-image-2", + "id": "5721131091780319468", + "creationTimestamp": "2013-08-01T11:30:25Z", + "name": "fake-image-2", + "description": "", + "sourceType": "RAW", + "rawDisk": { + "containerType": "TAR", + "source": "", + }, + "status": "READY", + "archiveSizeBytes": 2 +} +NEW_IMAGE = { + "kind": "compute#image", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/images/new-image", + "id": "7252430471608041491", + "creationTimestamp": "2013-08-02T11:30:25Z", + "name": "new-image", + "description": "new-description", + "sourceType": "RAW", + "rawDisk": { + "containerType": "TAR", + "source": "http://example.com/image.tar", + }, + "status": "READY", + "archiveSizeBytes": 5, +} + + +class ImagesControllerTest(common.GCEControllerTest): + """ + Test of the GCE API /images application controller w/Glance. + """ + + def setUp(self): + """Run before each test.""" + super(ImagesControllerTest, self).setUp() + + def test_get_image_list_filtered(self): + response = self.request_gce("/fake_project/global/images" + "?filter=name+eq+fake-image-2") + self.assertEqual(200, response.status_int) + response_body = copy.deepcopy(response.json_body) + self.assertIn("items", response_body) + expected_common = { + "kind": "compute#imageList", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/images", + "id": "projects/fake_project/global/images", + } + response_images = response_body.pop("items") + self.assertDictEqual(expected_common, response_body) + self.assertDictEqual(EXPECTED_IMAGE_2, response_images[0]) + self.assertIn(EXPECTED_IMAGE_2, response_images) + + def test_get_image_list(self): + response = self.request_gce('/fake_project/global/images') + self.assertEqual(200, response.status_int) + response_body = copy.deepcopy(response.json_body) + self.assertIn("items", response_body) + expected_common = { + "kind": "compute#imageList", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/images", + "id": "projects/fake_project/global/images", + } + response_images = response_body.pop("items") + self.assertDictEqual(expected_common, response_body) + self.assertIn(EXPECTED_IMAGE_1, response_images) + self.assertIn(EXPECTED_IMAGE_2, response_images) + + def test_get_image(self): + response = self.request_gce("/fake_project/global/images/fake-image-1") + self.assertEqual(200, response.status_int) + self.assertDictEqual(EXPECTED_IMAGE_1, response.json_body) + + def test_get_nonexistent_image(self): + response = self.request_gce('/fake_project/global/images/fake-image') + self.assertEqual(404, response.status_int) + + def test_create_image(self): + request_body = { + 'name': 'new-image', + 'rawDisk': { + 'containerType': 'TAR', + 'source': 'http://example.com/image.tar', + }, + 'sourceType': 'RAW', + 'description': 'new-description' + } + response = self.request_gce('/fake_project/global/images', + method="POST", + body=request_body) + expected = { + "operationType": "insert", + "targetId": "7252430471608041491", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/images/new-image", + } + expected.update(common.COMMON_PENDING_OPERATION) + self.assertEqual(200, response.status_int) + self.assertDictEqual(expected, response.json_body) + + response = self.request_gce('/fake_project/global/images/new-image') + self.assertEqual(200, response.status_int) + self.assertDictEqual(NEW_IMAGE, response.json_body) + + def test_delete_image(self): + response = self.request_gce( + '/fake_project/global/images/image-to-delete', method='DELETE') + expected = { + "operationType": "delete", + "targetId": "6451912522928418272", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/images/image-to-delete", + } + expected.update(common.COMMON_PENDING_OPERATION) + self.assertEqual(200, response.status_int) + self.assertDictEqual(expected, response.json_body) + + response = self.request_gce( + '/fake_project/global/images/image-to-delete') + self.assertEqual(404, response.status_int) + + def test_delete_nonexistent_image(self): + response = self.request_gce('/fake_project/global/images/fake-image', + method='DELETE') + self.assertEqual(404, response.status_int) diff --git a/gceapi/tests/api/test_instances.py b/gceapi/tests/api/test_instances.py new file mode 100644 index 0000000..477e122 --- /dev/null +++ b/gceapi/tests/api/test_instances.py @@ -0,0 +1,310 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from gceapi.tests.api import common + +EXPECTED_INSTANCES = [{ + "kind": "compute#instance", + "id": "3991024138321713624", + "creationTimestamp": "2013-08-14T13:45:32Z", + "zone": + "http://localhost/compute/v1beta15/projects/fake_project/zones/nova", + "status": "RUNNING", + "statusMessage": "ACTIVE", + "name": "i1", + "description": "i1 description", + "machineType": "http://localhost/compute/v1beta15/projects/fake_project" + "/zones/nova/machineTypes/m1-small", + "networkInterfaces": [{ + "network": "http://localhost/compute/v1beta15/projects/fake_project" + "/global/networks/private", + "networkIP": "10.0.1.3", + "name": "private", + "accessConfigs": [{ + "kind": "compute#accessConfig", + "type": "ONE_TO_ONE_NAT", + "name": "ip for i1", + "natIP": "192.168.138.196" + }] + }], + "disks": [{ + "kind": "compute#attachedDisk", + "index": 0, + "type": "PERSISTENT", + "mode": "READ_WRITE", + "source": "http://localhost/compute/v1beta15/projects/fake_project" + "/zones/nova/disks/i1", + "deviceName": "christmas-tree", + "boot": True, + }], + "metadata": { + "kind": "compute#metadata", + "items": [], + }, + "selfLink": "http://localhost/compute/v1beta15/projects/fake_project" + "/zones/nova/instances/i1" +}, { + "kind": "compute#instance", + "id": "3991024138321713621", + "creationTimestamp": "2013-08-14T13:46:36Z", + "zone": "http://localhost/compute/v1beta15/projects/fake_project" + "/zones/nova", + "status": "STOPPED", + "statusMessage": "SUSPENDED", + "name": "i2", + "description": "i2 description", + "machineType": "http://localhost/compute/v1beta15/projects/fake_project" + "/zones/nova/machineTypes/m1-large", + "networkInterfaces": [{ + "network": "http://localhost/compute/v1beta15/projects/fake_project" + "/global/networks/default", + "networkIP": "10.100.0.3", + "name": "default", + "accessConfigs": [] + }], + "disks": [], + "metadata": { + "kind": "compute#metadata", + "items": [], + }, + "selfLink": "http://localhost/compute/v1beta15/projects/fake_project" + "/zones/nova/instances/i2" +}] + + +class InstancesTest(common.GCEControllerTest): + + def setUp(self): + super(InstancesTest, self).setUp() + + def test_get_instance_by_invalid_name(self): + response = self.request_gce('/fake_project/zones/nova/instances/fake') + self.assertEqual(404, response.status_int) + + def test_get_instance_by_name(self): + response = self.request_gce('/fake_project/zones/nova/instances/i1') + + self.assertEqual(200, response.status_int) + self.assertDictEqual(response.json_body, EXPECTED_INSTANCES[0]) + + def test_get_instance_list_filtered(self): + response = self.request_gce("/fake_project/zones/nova/instances" + "?filter=name+eq+i1") + expected = { + "kind": "compute#instanceList", + "id": "projects/fake_project/zones/nova/instances", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/zones/nova/instances", + } + + response_body = copy.deepcopy(response.json_body) + instances = response_body.pop("items") + self.assertDictEqual(response_body, expected) + self.assertEqual(len(instances), 1) + self.assertDictEqual(instances[0], EXPECTED_INSTANCES[0]) + + def test_get_instance_list(self): + response = self.request_gce('/fake_project/zones/nova/instances') + expected = { + "kind": "compute#instanceList", + "id": "projects/fake_project/zones/nova/instances", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/zones/nova/instances", + } + + response_body = copy.deepcopy(response.json_body) + instances = response_body.pop("items") + self.assertDictEqual(response_body, expected) + self.assertDictEqual(instances[0], EXPECTED_INSTANCES[0]) + self.assertDictEqual(instances[1], EXPECTED_INSTANCES[1]) + + def test_get_instance_aggregated_list_filtered(self): + response = self.request_gce("/fake_project/aggregated/instances" + "?filter=name+eq+i2") + + expected = { + "kind": "compute#instanceAggregatedList", + "id": "projects/fake_project/aggregated/instances", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/aggregated/instances", + "items": { + "zones/nova": {}, + } + } + + response_body = copy.deepcopy(response.json_body) + instances = response_body["items"]["zones/nova"].pop("instances") + self.assertDictEqual(response_body, expected) + self.assertEqual(len(instances), 1) + self.assertDictEqual(instances[0], EXPECTED_INSTANCES[1]) + + def test_get_instance_aggregated_list(self): + response = self.request_gce('/fake_project/aggregated/instances') + + expected = { + "kind": "compute#instanceAggregatedList", + "id": "projects/fake_project/aggregated/instances", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/aggregated/instances", + "items": { + "zones/nova": {}, + } + } + + response_body = copy.deepcopy(response.json_body) + instances = response_body["items"]["zones/nova"].pop("instances") + self.assertDictEqual(response_body, expected) + self.assertDictInListBySelfLink(EXPECTED_INSTANCES[0], instances) + self.assertDictInListBySelfLink(EXPECTED_INSTANCES[1], instances) + + def test_delete_instance_with_invalid_name(self): + response = self.request_gce("/fake_project/zones/nova" + "/instances/fake-instance", method="DELETE") + self.assertEqual(404, response.status_int) + + def test_delete_instance(self): + response = self.request_gce( + "/fake_project/zones/nova/instances/i2", + method="DELETE") + expected = { + "targetId": "3991024138321713621", + "operationType": "delete", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/instances/i2", + } + expected.update(common.COMMON_ZONE_PENDING_OPERATION) + self.assertEqual(200, response.status_int) + self.assertDictEqual(expected, response.json_body) + + def test_reset_instance(self): + response = self.request_gce( + "/fake_project/zones/nova/instances/i1/reset", + method="POST") + expected = { + "operationType": "reset", + "targetId": "3991024138321713624", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/instances/i1", + } + expected.update(common.COMMON_ZONE_PENDING_OPERATION) + self.assertEqual(200, response.status_int) + self.assertDictEqual(expected, response.json_body) + + def test_create_instance(self): + request_body = { + "name": "i3", + "description": "inst01descr", + "machineType": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/m1-small", + "disks": [{ + "kind": "compute#attachedDisk", + "boot": True, + "type": "PERSISTENT", + "mode": "READ_WRITE", + "deviceName": "vda", + "zone": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova", + "source": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/disks/fake-disk-1" + }], + "networkInterfaces": [{ + "kind": "compute#instanceNetworkInterface", + "network": ("http://localhost/compute/v1beta15/projects" + "/admin/fake_project/global/private"), + }], + } + response = self.request_gce("/fake_project/zones/nova/instances", + method="POST", + body=request_body) + self.assertEqual(200, response.status_int) + expected = { + "operationType": "insert", + "targetId": "3991024138321713622", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/instances/i3", + } + expected.update(common.COMMON_ZONE_PENDING_OPERATION) + self.assertDictEqual(expected, response.json_body) + + def test_add_access_config(self): + request_body = { + "name": "ip for i2", + "type": "ONE_TO_ONE_NAT", + "natIP": "192.168.138.195" + } + response = self.request_gce("/fake_project/zones/nova" + "/instances/i2/addAccessConfig?networkInterface=default", + method="POST", + body=request_body) + expected = { + "operationType": "addAccessConfig", + "targetId": "3991024138321713621", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/instances/i2", + } + expected.update(common.COMMON_ZONE_FINISHED_OPERATION) + self.assertEqual(200, response.status_int) + self.assertDictEqual(expected, response.json_body) + + def test_delete_access_config(self): + response = self.request_gce("/fake_project/zones/nova/" + "instances/i1/deleteAccessConfig" + "?accessConfig=ip for i1" + "&networkInterface=private", + method="POST") + expected = { + "operationType": "deleteAccessConfig", + "targetId": "3991024138321713624", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/instances/i1", + } + expected.update(common.COMMON_ZONE_FINISHED_OPERATION) + self.assertEqual(200, response.status_int) + self.assertDictEqual(expected, response.json_body) + + def test_attach_disk(self): + request_body = { + "deviceName": "ghost", + "source": "http://localhost/compute/v1beta15/projects/fake_project" + "/zones/nova/disks/i1" + } + response = self.request_gce("/fake_project/zones/nova" + "/instances/i2/attachDisk", + method="POST", + body=request_body) + expected = { + "operationType": "attachDisk", + "targetId": "3991024138321713621", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/instances/i2", + } + expected.update(common.COMMON_ZONE_PENDING_OPERATION) + self.assertEqual(200, response.status_int) + self.assertDictEqual(expected, response.json_body) + + def test_detach_disk(self): + response = self.request_gce("/fake_project/zones/nova/" + "instances/i1/detachDisk?deviceName=christmas-tree", + method="POST") + expected = { + "operationType": "detachDisk", + "targetId": "3991024138321713624", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/instances/i1", + } + expected.update(common.COMMON_ZONE_PENDING_OPERATION) + self.assertEqual(200, response.status_int) + self.assertDictEqual(expected, response.json_body) diff --git a/gceapi/tests/api/test_machine_types.py b/gceapi/tests/api/test_machine_types.py new file mode 100644 index 0000000..6c475c7 --- /dev/null +++ b/gceapi/tests/api/test_machine_types.py @@ -0,0 +1,148 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from gceapi.api import machine_types +from gceapi.tests.api import common + + +EXPECTED_FLAVORS = [{ + "kind": "compute#machineType", + "id": "7739288395178120473", + "description": "", + "name": "m1-small", + "guestCpus": 1, + "memoryMb": 2048, + "imageSpaceGb": 20, + "maximumPersistentDisks": 0, + "maximumPersistentDisksSizeGb": 0, + "zone": "http://localhost/compute/v1beta15/projects/fake_project" + "/zones/nova", + "selfLink": "http://localhost/compute/v1beta15/projects/fake_project" + "/zones/nova/machineTypes/m1-small" + }, + { + "kind": "compute#machineType", + "id": "6065497922195565467", + "description": "", + "name": "m1-large", + 'scratchDisks': [{"diskGb": 870L}], + "guestCpus": 4, + "memoryMb": 8192, + "imageSpaceGb": 80, + "maximumPersistentDisks": 0, + "maximumPersistentDisksSizeGb": 0, + "zone": "http://localhost/compute/v1beta15/projects/fake_project" + "/zones/nova", + "selfLink": "http://localhost/compute/v1beta15/projects/fake_project" + "/zones/nova/machineTypes/m1-large" + }] + + +class MachineTypesTest(common.GCEControllerTest): + def setUp(self): + super(MachineTypesTest, self).setUp() + self.controller = machine_types.Controller() + + def test_get_machine_type_by_invalid_name(self): + response = self.request_gce( + '/fake_project//zones/nova/machineTypes/wrongMachineType') + self.assertEqual(404, response.status_int) + + def test_get_flavor_by_name(self): + response = self.request_gce( + '/fake_project/zones/nova/machineTypes/m1-small') + expected = EXPECTED_FLAVORS[0] + + self.assertDictEqual(response.json_body, expected) + + def test_get_flavor_list_filtered(self): + response = self.request_gce("/fake_project/zones/nova/machineTypes" + "?filter=name+eq+m1-large") + expected = { + "kind": "compute#machineTypeList", + "id": "projects/fake_project/zones/nova/machineTypes", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/zones/nova/machineTypes", + "items": [EXPECTED_FLAVORS[1]] + } + + self.assertEqual(response.json_body, expected) + + def test_get_flavor_list_paged(self): + response = self.request_gce("/fake_project/zones/nova/machineTypes" + "?maxResults=1") + expected = { + "kind": "compute#machineTypeList", + "id": "projects/fake_project/zones/nova/machineTypes", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/zones/nova/machineTypes", + "items": [EXPECTED_FLAVORS[1]], + "nextPageToken": "1" + } + + self.assertDictEqual(response.json_body, expected) + + response = self.request_gce("/fake_project/zones/nova/machineTypes" + "?maxResults=1&pageToken=1") + expected = { + "kind": "compute#machineTypeList", + "id": "projects/fake_project/zones/nova/machineTypes", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/zones/nova/machineTypes", + "items": [EXPECTED_FLAVORS[0]] + } + + self.assertDictEqual(response.json_body, expected) + + def test_get_flavor_list(self): + response = self.request_gce('/fake_project/zones/nova/machineTypes') + expected = { + "kind": "compute#machineTypeList", + "id": "projects/fake_project/zones/nova/machineTypes", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/zones/nova/machineTypes", + "items": EXPECTED_FLAVORS + } + + self.assertEqual(response.json_body, expected) + + def test_get_flavor_aggregated_list(self): + response = self.request_gce('/fake_project/aggregated/machineTypes') + + expected_flavors2 = copy.deepcopy(EXPECTED_FLAVORS) + for flavor in expected_flavors2: + flavor["zone"] = flavor["zone"].replace("nova", "unavailable_zone") + flavor["selfLink"] = flavor["selfLink"].replace( + "nova", "unavailable_zone") + # NOTE(apavlov) fix id due to changed selfLink + # (gce_api calculate id from selfLink) + hashed_link = hash(flavor["selfLink"]) + flavor["id"] = hashed_link if hashed_link >= 0 else -hashed_link + flavor["id"] = str(flavor["id"]) + + expected = { + "kind": "compute#machineTypeAggregatedList", + "id": "projects/fake_project/aggregated/machineTypes", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/aggregated/machineTypes", + "items": { + "zones/nova": { + "machineTypes": EXPECTED_FLAVORS + }, + } + } + + self.assertEqual(response.json_body, expected) diff --git a/gceapi/tests/api/test_networks.py b/gceapi/tests/api/test_networks.py new file mode 100644 index 0000000..fa98467 --- /dev/null +++ b/gceapi/tests/api/test_networks.py @@ -0,0 +1,117 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import networks +from gceapi.tests.api import common + + +EXPECTED_NETWORKS = [{ + "kind": "compute#network", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/global/networks/private", + "name": "private", + "id": "1543653731328164645", + "IPv4Range": "10.0.0.0/24", + "gatewayIPv4": "10.0.0.1", + "creationTimestamp": "2013-12-25T09:05:07.396957Z", + "description": "main network", +}, { + "kind": "compute#network", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/global/networks/public", + "name": "public", + "id": "8340158205161619676", + "IPv4Range": "172.24.4.224/28", + "gatewayIPv4": "172.24.4.225", + "creationTimestamp": "", +}] + + +class NetworksControllerTest(common.GCEControllerTest): + + def setUp(self): + """Run before each test.""" + super(NetworksControllerTest, self).setUp() + self.controller = networks.Controller() + + def test_get_network_by_invalid_name(self): + response = self.request_gce( + '/fake_project/global/networks/wrongNetworkName') + self.assertEqual(404, response.status_int) + + def test_get_network(self): + response = self.request_gce('/fake_project/global/networks/public') + expected = EXPECTED_NETWORKS[1] + + self.assertEqual(response.json_body, expected) + + def test_get_networks_list_filtered(self): + response = self.request_gce("/fake_project/global/networks" + "?filter=name+eq+public") + expected = { + "kind": "compute#networkList", + "id": "projects/fake_project/global/networks", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/global/networks", + "items": [EXPECTED_NETWORKS[1]] + } + + self.assertEqual(response.json_body, expected) + + def test_get_networks_list(self): + response = self.request_gce('/fake_project/global/networks') + expected = { + "kind": "compute#networkList", + "id": "projects/fake_project/global/networks", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/global/networks", + "items": EXPECTED_NETWORKS + } + + self.assertEqual(response.json_body, expected) + + def test_create_network(self): + request_body = { + "IPv4Range": "10.100.0.0/24", + "kind": "compute#network", + "gatewayIPv4": "10.100.0.1", + "name": "mynet", + "description": "" + } + response = self.request_gce('/fake_project/global/networks', + method="POST", + body=request_body) + + expected = { + "operationType": "insert", + "targetId": "7132179741530156151", + "targetLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/global/networks/mynet", + } + expected.update(common.COMMON_FINISHED_OPERATION) + self.assertEqual(200, response.status_int) + self.assertEqual(response.json_body, expected) + + def test_delete_network(self): + response = self.request_gce( + '/fake_project/global/networks/public', method='DELETE') + expected = { + "operationType": "delete", + "targetId": "8340158205161619676", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/networks/public", + } + expected.update(common.COMMON_FINISHED_OPERATION) + self.assertEqual(200, response.status_int) + self.assertEqual(expected, response.json_body) diff --git a/gceapi/tests/api/test_operations.py b/gceapi/tests/api/test_operations.py new file mode 100644 index 0000000..6ee54d3 --- /dev/null +++ b/gceapi/tests/api/test_operations.py @@ -0,0 +1,323 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import operations +from gceapi.tests.api import common + +FAKE_ADD_INSTANCE = { + u'status': u'DONE', + u'kind': u'compute#operation', + u'operationType': u'add', + u'zone': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova'), + u'startTime': u'2014-01-20T11:17:39.935278Z', + u'targetId': u'3991024138321713624', + u'name': u'operation-47be73d8-b8fe-4148-9e3b-3f323136ee57', + u'targetLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova/instances/i1'), + u'insertTime': u'2014-01-20T11:17:39.735738Z', + u'progress': 100, + u'endTime': u'2013-12-27T08:46:34.684354Z', + u'id': u'2720525776854968247', + u'selfLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova/operations/' + 'operation-47be73d8-b8fe-4148-9e3b-3f323136ee57'), + u'user': u'admin' +} +FAKE_DELETE_INSTANCE = { + u'status': u'DONE', + u'kind': u'compute#operation', + u'operationType': u'delete', + u'zone': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova'), + u'startTime': u'2014-01-20T11:17:39.935278Z', + u'targetId': u'6879239049877988420', + u'name': u'operation-fbd91157-91e9-4121-af74-090260aa38cc', + u'targetLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova/instances/i-deleted'), + u'insertTime': u'2014-01-20T11:17:39.735738Z', + u'progress': 100, + u'endTime': u'2013-12-27T08:46:34.684354Z', + u'id': u'5384375190177147022', + u'selfLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova/operations/' + 'operation-fbd91157-91e9-4121-af74-090260aa38cc'), + u'user': u'admin' +} +FAKE_RESET_INSTANCE = { + u'status': u'DONE', + u'kind': u'compute#operation', + u'operationType': u'reset', + u'zone': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova'), + u'startTime': u'2014-01-20T11:17:39.935278Z', + u'targetId': u'3991024138321713621', + u'name': u'operation-6fc4e7e2-c0c8-4f97-bf1d-f6f958eb17b7', + u'targetLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova/instances/i2'), + u'insertTime': u'2014-01-20T11:17:39.735738Z', + u'progress': 100, + u'endTime': u'2013-12-27T08:46:34.684354Z', + u'id': u'1756014432056394800', + u'selfLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova/operations/' + 'operation-6fc4e7e2-c0c8-4f97-bf1d-f6f958eb17b7'), + u'user': u'admin' +} +FAKE_ADD_DISK = { + u'status': u'DONE', + u'kind': u'compute#operation', + u'operationType': u'add', + u'zone': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova'), + u'startTime': u'2014-01-20T11:17:39.935278Z', + u'targetId': u'9202387718698825408', + u'name': u'operation-05e2a2b2-9708-4386-97cc-2318df3357b6', + u'targetLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova/disks/fake-disk-1'), + u'insertTime': u'2014-01-20T11:17:39.735738Z', + u'progress': 100, + u'endTime': u'2013-12-27T08:46:34.684354Z', + u'id': u'5828976712396009927', + u'selfLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova/operations/' + 'operation-05e2a2b2-9708-4386-97cc-2318df3357b6'), + u'user': u'admin' +} +FAKE_DELETE_DISK = { + u'status': u'DONE', + u'kind': u'compute#operation', + u'operationType': u'delete', + u'zone': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova'), + u'startTime': u'2014-01-20T11:17:39.935278Z', + u'targetId': u'3806967300998164012', + u'name': u'operation-1cfd73fa-9b79-43ef-bbc7-c44bc514ba2e', + u'targetLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova/disks/fake-deleted-disk'), + u'insertTime': u'2014-01-20T11:17:39.735738Z', + u'progress': 100, + u'endTime': u'2013-12-27T08:46:34.684354Z', + u'id': u'1352585941258466199', + u'selfLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova/operations/' + 'operation-1cfd73fa-9b79-43ef-bbc7-c44bc514ba2e'), + u'user': u'admin' +} +FAKE_CREATE_SNAPSHOT = { + u'status': u'DONE', + u'kind': u'compute#operation', + u'operationType': u'createSnapshot', + u'zone': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova'), + u'startTime': u'2014-01-20T11:17:39.935278Z', + u'targetId': u'9202387718698825406', + u'name': u'operation-3f6f1326-3e7c-4076-be6b-939147d031ae', + u'targetLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova/disks/fake-disk-3'), + u'insertTime': u'2014-01-20T11:17:39.735738Z', + u'progress': 100, + u'endTime': u'2013-12-27T08:46:34.684354Z', + u'id': u'8142453451801876697', + u'selfLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/zones/nova/operations/' + 'operation-3f6f1326-3e7c-4076-be6b-939147d031ae'), + u'user': u'admin' +} +FAKE_DELETE_SNAPSHOT = { + u'status': u'DONE', + u'kind': u'compute#operation', + u'insertTime': u'2014-01-20T11:17:39.735738Z', + u'startTime': u'2014-01-20T11:17:39.935278Z', + u'targetId': u'4046627746386228297', + u'name': u'operation-e72badca-0273-4a69-9303-181df05e602c', + u'targetLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/global/snapshots/fake-deleted-snapshot'), + u'operationType': u'delete', + u'progress': 100, + u'endTime': u'2013-12-27T08:46:34.684354Z', + u'id': u'3651183053589617825', + u'selfLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/global/operations/' + 'operation-e72badca-0273-4a69-9303-181df05e602c'), + u'user': u'admin' +} +FAKE_ADD_IMAGE = { + u'status': u'DONE', + u'kind': u'compute#operation', + u'insertTime': u'2014-01-20T11:17:39.735738Z', + u'startTime': u'2014-01-20T11:17:39.935278Z', + u'targetId': u'5721131091780319465', + u'name': u'operation-9417e8bd-e8cc-47a1-86e8-c4c24c043b3d', + u'targetLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/global/images/fake-image-1'), + u'operationType': u'add', + u'progress': 100, + u'endTime': u'2013-12-27T08:46:34.684354Z', + u'id': u'939083621940800216', + u'selfLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/global/operations/' + 'operation-9417e8bd-e8cc-47a1-86e8-c4c24c043b3d'), + u'user': u'admin' +} +FAKE_DELETE_IMAGE = { + u'status': u'DONE', + u'kind': u'compute#operation', + u'insertTime': u'2014-01-20T11:17:39.735738Z', + u'startTime': u'2014-01-20T11:17:39.935278Z', + u'targetId': u'5396967400190520435', + u'name': u'operation-0aad68c4-ee6b-45da-af7e-9e696a885168', + u'targetLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/global/images/fake-deleted-image'), + u'operationType': u'delete', + u'progress': 100, + u'endTime': u'2013-12-27T08:46:34.684354Z', + u'id': u'984725436897145210', + u'selfLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/global/operations/' + 'operation-0aad68c4-ee6b-45da-af7e-9e696a885168'), + u'user': u'admin' +} +FAKE_SET_METADATA = { + u'status': u'DONE', + u'kind': u'compute#operation', + u'insertTime': u'2014-01-20T11:17:39.735738Z', + u'startTime': u'2014-01-20T11:17:39.935278Z', + u'targetId': u'504224095749693425', + u'name': u'operation-a7b6bb82-d51f-4f04-a07c-bd9241bc2aac', + u'targetLink': u'http://localhost/compute/v1beta15/projects/fake_project', + u'operationType': u'setMetadata', + u'progress': 100, + u'endTime': u'2014-01-20T11:17:43.378890Z', + u'id': u'6371605128170593585', + u'selfLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/global/operations/' + 'operation-a7b6bb82-d51f-4f04-a07c-bd9241bc2aac'), + u'user': u'admin' +} + + +class OperationsControllerTest(common.GCEControllerTest): + + def setUp(self): + """Run before each test.""" + super(OperationsControllerTest, self).setUp() + self.controller = operations.Controller() + + def test_aggregated_list_combined_with_update_progress(self): + response = self.request_gce('/fake_project/aggregated/operations') + self.assertEqual(200, response.status_int) + response_body = response.json_body + self.assertIn("items", response_body) + expected_common = { + "kind": "compute#operationAggregatedList", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/aggregated/operations", + "id": "projects/fake_project/aggregated/operations", + } + operation_dict = response_body.pop("items") + self.assertDictEqual(expected_common, response_body) + self.assertIn("global", operation_dict) + self.assertIn("operations", operation_dict["global"]) + operations = operation_dict["global"].pop("operations") + self.assertItemsEqual([FAKE_DELETE_SNAPSHOT, + FAKE_ADD_IMAGE, FAKE_DELETE_IMAGE, + FAKE_SET_METADATA], operations) + self.assertEqual(0, len(operation_dict["global"])) + operation_dict.pop("global") + self.assertIn("zones/nova", operation_dict) + self.assertIn("operations", operation_dict["zones/nova"]) + operations = operation_dict["zones/nova"].pop("operations") + self.assertItemsEqual([FAKE_ADD_INSTANCE, FAKE_DELETE_INSTANCE, + FAKE_RESET_INSTANCE, + FAKE_ADD_DISK, FAKE_DELETE_DISK, + FAKE_CREATE_SNAPSHOT], operations) + self.assertEqual(0, len(operation_dict["zones/nova"])) + operation_dict.pop("zones/nova") + self.assertEqual(0, len(operation_dict)) + + def test_list_zone_operations(self): + response = self.request_gce('/fake_project/zones/nova/operations') + self.assertEqual(200, response.status_int) + response_body = response.json_body + self.assertIn("items", response_body) + expected_common = { + "kind": "compute#operationList", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/operations", + "id": "projects/fake_project/zones/nova/operations", + } + operations = response_body.pop("items") + self.assertDictEqual(expected_common, response_body) + self.assertItemsEqual([FAKE_ADD_INSTANCE, FAKE_DELETE_INSTANCE, + FAKE_RESET_INSTANCE, + FAKE_ADD_DISK, FAKE_DELETE_DISK, + FAKE_CREATE_SNAPSHOT], operations) + + def test_list_global_operations(self): + response = self.request_gce('/fake_project/global/operations') + self.assertEqual(200, response.status_int) + response_body = response.json_body + self.assertIn("items", response_body) + expected_common = { + "kind": "compute#operationList", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/operations", + "id": "projects/fake_project/global/operations", + } + operations = response_body.pop("items") + self.assertDictEqual(expected_common, response_body) + self.assertItemsEqual([FAKE_DELETE_SNAPSHOT, + FAKE_ADD_IMAGE, FAKE_DELETE_IMAGE, + FAKE_SET_METADATA], operations) + + def test_get_global_operation(self): + response = self.request_gce( + '/fake_project/global/operations/' + 'operation-a7b6bb82-d51f-4f04-a07c-bd9241bc2aac') + self.assertEqual(200, response.status_int) + self.assertEqual(FAKE_SET_METADATA, response.json_body) + + def test_get_zone_operation(self): + response = self.request_gce( + '/fake_project/zones/nova/operations/' + 'operation-05e2a2b2-9708-4386-97cc-2318df3357b6') + self.assertEqual(200, response.status_int) + self.assertEqual(FAKE_ADD_DISK, response.json_body) + + def test_get_global_operation_from_zone(self): + response = self.request_gce( + '/fake_project/zones/nova/operations/' + 'operation-a7b6bb82-d51f-4f04-a07c-bd9241bc2aac') + self.assertEqual(404, response.status_int) + + def test_get_zone_operation_from_global(self): + response = self.request_gce( + '/fake_project/global/operations/' + 'operation-05e2a2b2-9708-4386-97cc-2318df3357b6') + self.assertEqual(404, response.status_int) + + def test_delete_operation(self): + response = self.request_gce( + '/fake_project/global/operations/' + 'operation-a7b6bb82-d51f-4f04-a07c-bd9241bc2aac', + method="DELETE") + self.assertEqual(204, response.status_int) + + def test_delete_operation_from_other_scope(self): + response = self.request_gce( + '/fake_project/zones/nova/operations/' + 'operation-a7b6bb82-d51f-4f04-a07c-bd9241bc2aac', + method="DELETE") + self.assertEqual(204, response.status_int) diff --git a/gceapi/tests/api/test_projects.py b/gceapi/tests/api/test_projects.py new file mode 100644 index 0000000..6a9de0d --- /dev/null +++ b/gceapi/tests/api/test_projects.py @@ -0,0 +1,92 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import projects +from gceapi.tests.api import common + + +EXPECTED_PROJECT = { + "kind": "compute#project", + "selfLink": "http://localhost/compute/v1beta15/projects/fake_project", + "id": "504224095749693425", + "name": "fake_project", + "description": "", + "commonInstanceMetadata": { + "kind": "compute#metadata" + }, + "quotas": [{ + "metric": "CPU", + "limit": 17.0, + "usage": 1.0 + }, + { + "metric": "INSTANCES", + "limit": 10.0, + "usage": 4.0 + }, + { + "usage": 2.0, + "metric": "DISKS_TOTAL_GB", + "limit": 1000.0 + }, + { + "usage": 1.0, + "metric": "SNAPSHOTS", + "limit": 10.0 + }, + { + "usage": 1.0, + "metric": "DISKS", + "limit": 10.0 + }, + { + "usage": 2.0, + "metric": "FIREWALLS", + "limit": 10.0 + }, + { + "usage": 1.0, + "metric": "STATIC_ADDRESSES", + "limit": 50.0 + }, + { + "usage": 2.0, + "metric": "NETWORKS", + "limit": 10.0 + } +]} + + +class ProjectsTest(common.GCEControllerTest): + def setUp(self): + super(ProjectsTest, self).setUp() + self.controller = projects.Controller() + + def test_get_project(self): + response = self.request_gce("/fake_project") + self.assertDictEqual(response.json_body, EXPECTED_PROJECT) + + def test_set_common_instance_metadata(self): + expected = { + "operationType": "setMetadata", + "targetId": "504224095749693425", + "targetLink": "http://localhost/compute/v1beta15/projects" + "/fake_project", + } + expected.update(common.COMMON_FINISHED_OPERATION) + body = {"items": [], "kind": "compute#metadata"} + response = self.request_gce("/fake_project/setCommonInstanceMetadata", + method="POST", + body=body) + self.assertDictEqual(response.json_body, expected) diff --git a/gceapi/tests/api/test_regions.py b/gceapi/tests/api/test_regions.py new file mode 100644 index 0000000..b01955b --- /dev/null +++ b/gceapi/tests/api/test_regions.py @@ -0,0 +1,77 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import regions +from gceapi.tests.api import common + + +EXPECTED_REGIONS = [ + { + "id": "6643843765891209621", + "kind": "compute#region", + "selfLink": "http://localhost/compute/v1beta15/projects/fake_project" + "/regions/nova", + "name": "nova", + "status": "UP", + "zones": [ + "http://localhost/compute/v1beta15/projects/fake_project" + "/zones/nova"] + }, +] + + +class RegionsControllerTest(common.GCEControllerTest): + """ + Test of the GCE API /regions appliication. + """ + + def setUp(self): + """Run before each test.""" + super(RegionsControllerTest, self).setUp() + self.controller = regions.Controller() + + def test_get_region_by_invalid_name(self): + response = self.request_gce('/fake_project/regions/fakeregion') + self.assertEqual(404, response.status_int) + + def test_get_region(self): + response = self.request_gce('/fake_project/regions/nova') + expected = EXPECTED_REGIONS[0] + + self.assertEqual(response.json_body, expected) + + def test_get_region_list_filtered(self): + response = self.request_gce("/fake_project/regions" + "?filter=name+eq+nova") + expected = { + "kind": "compute#regionList", + "id": "projects/fake_project/regions", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/regions", + "items": [EXPECTED_REGIONS[0]] + } + + self.assertEqual(response.json_body, expected) + + def test_get_region_list(self): + response = self.request_gce('/fake_project/regions') + expected = { + "kind": "compute#regionList", + "id": "projects/fake_project/regions", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/regions", + "items": EXPECTED_REGIONS + } + + self.assertEqual(response.json_body, expected) diff --git a/gceapi/tests/api/test_routes.py b/gceapi/tests/api/test_routes.py new file mode 100644 index 0000000..97e9702 --- /dev/null +++ b/gceapi/tests/api/test_routes.py @@ -0,0 +1,255 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.tests.api import common + +FAKE_LOCAL_ROUTE_1 = { + u'priority': 1000, + u'kind': u'compute#route', + u'description': u'Default route to the virtual network.', + u'name': u'default-route-734b9c83-3a8b-4350-8fbf-d40f571ee163-local', + u'nextHopNetwork': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/networks/private'), + u'destRange': u'10.0.0.0/24', + u'id': u'6109690470355354668', + u'selfLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/global/routes/' + 'default-route-734b9c83-3a8b-4350-8fbf-d40f571ee163-local'), + u'network': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/networks/private'), + u'creationTimestamp': u'', +} +FAKE_INTERNET_ROUTE_1 = { + u'nextHopGateway': ( + u'http://localhost/compute/v1beta15/projects/' + 'fake_project/global/gateways/default-internet-gateway'), + u'kind': u'compute#route', + u'description': u'Default route to the Internet.', + u'name': u'default-route-734b9c83-3a8b-4350-8fbf-d40f571ee163-internet', + u'priority': 1000, + u'destRange': u'0.0.0.0/0', + u'id': u'6686112297298011631', + u'selfLink': ( + u'http://localhost/compute/v1beta15/projects/' + 'fake_project/global/routes/' + 'default-route-734b9c83-3a8b-4350-8fbf-d40f571ee163-internet'), + u'network': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/networks/private'), + u'creationTimestamp': u'', +} +FAKE_CUSTOM_ROUTE_1 = { + u'kind': u'compute#route', + u'name': u'custom-route-1', + u'description': u'route for 32.44.64.0/24', + u'priority': 1000, + u'nextHopIp': u'10.0.0.32', + u'destRange': u'32.44.64.0/24', + u'id': u'8814469654458772789', + u'selfLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/global/routes/custom-route-1'), + u'network': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/networks/private'), + u'creationTimestamp': u'2013-12-25T09:05:07.396957Z', +} +FAKE_CUSTOM_ROUTE_2 = { + u'kind': u'compute#route', + u'name': (u'custom-route-734b9c83-3a8b-4350-8fbf-d40f571ee163-' + 'dst-89-34-0-0-16-gw-10-0-0-78'), + u'priority': 1000, + u'nextHopIp': u'10.0.0.78', + u'destRange': u'89.34.0.0/16', + u'id': u'4048181833789971692', + u'selfLink': ( + u'http://localhost/compute/v1beta15/projects/' + 'fake_project/global/routes/custom-route-' + '734b9c83-3a8b-4350-8fbf-d40f571ee163-dst-89-34-0-0-16-gw-10-0-0-78'), + u'network': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/networks/private'), + u'creationTimestamp': u'', +} +FAKE_LOCAL_ROUTE_2 = { + u'priority': 1000, + u'kind': u'compute#route', + u'description': u'Default route to the virtual network.', + u'name': u'default-route-7aa33661-33ba-4291-a2c7-44bfd59884c1-local', + u'nextHopNetwork': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/networks/public'), + u'destRange': u'172.24.4.224/28', + u'id': u'2822661357924528032', + u'selfLink': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/global/routes/' + 'default-route-7aa33661-33ba-4291-a2c7-44bfd59884c1-local'), + u'network': (u'http://localhost/compute/v1beta15/projects/' + 'fake_project/networks/public'), + u'creationTimestamp': u'', +} + + +class RoutesControllerTest(common.GCEControllerTest): + """ + Test of the GCE API /routes application controller w/Neutron. + """ + + def test_list_routes(self): + response = self.request_gce('/fake_project/global/routes') + self.assertEqual(200, response.status_int) + response_body = response.json_body + self.assertIn("items", response_body) + expected_common = { + "kind": "compute#routeList", + "selfLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/routes", + "id": "projects/fake_project/global/routes", + } + response_routes = response_body.pop("items") + self.assertDictEqual(expected_common, response_body) + self.assertItemsEqual( + [FAKE_LOCAL_ROUTE_1, FAKE_INTERNET_ROUTE_1, + FAKE_CUSTOM_ROUTE_1, FAKE_CUSTOM_ROUTE_2, FAKE_LOCAL_ROUTE_2], + response_routes) + + def test_get_route(self): + response = self.request_gce('/fake_project/global/routes/' + 'custom-route-1') + self.assertEqual(200, response.status_int) + response_body = response.json_body + self.assertDictEqual(FAKE_CUSTOM_ROUTE_1, response_body) + + def test_get_local_route(self): + response = self.request_gce( + '/fake_project/global/routes/' + 'default-route-734b9c83-3a8b-4350-8fbf-d40f571ee163-local') + self.assertEqual(200, response.status_int) + response_body = response.json_body + self.assertDictEqual(FAKE_LOCAL_ROUTE_1, response_body) + + def test_get_nonexistent_route(self): + response = self.request_gce( + '/fake_project/global/routes/' + 'nonexistent_route') + self.assertEqual(404, response.status_int) + + def test_add_internet_route(self): + request_body = { + 'destRange': '0.0.0.0/0', + 'name': 'custom-internet-route', + 'network': 'private', + 'nextHopGateway': ( + 'http://localhost/compute/v1beta15/projects/' + 'fake_project/global/gateways/default-internet-gateway'), + 'priority': 1000, + } + response = self.request_gce('/fake_project/global/routes', + method="POST", + body=request_body) + expected = { + "operationType": "insert", + "targetId": "3171351404482340798", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/routes/custom-internet-route", + } + expected.update(common.COMMON_FINISHED_OPERATION) + self.assertEqual(200, response.status_int) + self.assertDictEqual(expected, response.json_body) + + def test_add_custom_route(self): + request_body = { + 'destRange': '40.81.234.0/24', + 'name': 'custom-route', + 'network': 'private', + 'nextHopIp': '10.0.0.107', + 'priority': 1000, + } + response = self.request_gce('/fake_project/global/routes', + method="POST", + body=request_body) + expected = { + "operationType": "insert", + "targetId": "7622192026776022193", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/routes/custom-route", + } + expected.update(common.COMMON_FINISHED_OPERATION) + self.assertEqual(200, response.status_int) + self.assertDictEqual(expected, response.json_body) + + def test_add_duplicate_route(self): + request_body = { + 'destRange': '40.81.234.0/24', + 'name': 'custom-route-1', + 'network': 'private', + 'nextHopIp': '10.0.0.107', + 'priority': 1000, + } + response = self.request_gce('/fake_project/global/routes', + method="POST", + body=request_body) + self.assertEqual(400, response.status_int) + + def test_add_unsupported_route(self): + request_body = { + 'destRange': '40.81.234.0/24', + 'name': 'instance-route', + 'network': 'private', + 'nextHopInstance': 'instance', + 'priority': 1000, + } + response = self.request_gce('/fake_project/global/routes', + method="POST", + body=request_body) + self.assertEqual(400, response.status_int) + + def test_delete_local_route(self): + response = self.request_gce( + '/fake_project/global/routes/' + 'default-route-734b9c83-3a8b-4350-8fbf-d40f571ee163-local', + method="DELETE") + self.assertEqual(400, response.status_int) + + def test_delete_internet_route(self): + response = self.request_gce( + '/fake_project/global/routes/' + 'default-route-734b9c83-3a8b-4350-8fbf-d40f571ee163-internet', + method="DELETE") + expected = { + "operationType": "delete", + "targetId": "6686112297298011631", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/routes/default-route-" + "734b9c83-3a8b-4350-8fbf-d40f571ee163-internet", + } + expected.update(common.COMMON_FINISHED_OPERATION) + self.assertEqual(200, response.status_int) + self.assertDictEqual(expected, response.json_body) + + def test_delete_custom_route(self): + response = self.request_gce( + '/fake_project/global/routes/custom-route-1', + method="DELETE") + expected = { + "operationType": "delete", + "targetId": "8814469654458772789", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/routes/custom-route-1", + } + expected.update(common.COMMON_FINISHED_OPERATION) + self.assertEqual(200, response.status_int) + self.assertDictEqual(expected, response.json_body) + + def test_delete_unexistent_route(self): + response = self.request_gce( + '/fake_project/global/routes/' + 'nonexistent-route', + method="DELETE") + self.assertEqual(404, response.status_int) diff --git a/gceapi/tests/api/test_snapshots.py b/gceapi/tests/api/test_snapshots.py new file mode 100644 index 0000000..0b56c60 --- /dev/null +++ b/gceapi/tests/api/test_snapshots.py @@ -0,0 +1,117 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from gceapi.api import snapshots +from gceapi.tests.api import common + +EXPECTED_SNAPSHOTS = [{ + "kind": "compute#snapshot", + "selfLink": "http://localhost/compute/v1beta15/projects/fake_project/" + "global/snapshots/fake-snapshot", + "id": "8386122516930476063", + "creationTimestamp": "2013-08-14T12:32:28Z", + "status": "READY", + "diskSizeGb": 2, + "sourceDisk": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/disks/fake-disk-3", + "name": "fake-snapshot", + "description": "full description of snapshot 1", + "sourceDiskId": "9202387718698825406" +}] + + +class SnapshotsTest(common.GCEControllerTest): + + def setUp(self): + super(SnapshotsTest, self).setUp() + self.controller = snapshots.Controller() + + def test_get_snapshot_by_invalid_name(self): + response = self.request_gce("/fake_project/global/snapshots/fake") + self.assertEqual(404, response.status_int) + + def test_get_snapshot_by_name(self): + response = self.request_gce("/fake_project/global/snapshots" + "/fake-snapshot") + + self.assertEqual(200, response.status_int) + self.assertDictEqual(response.json_body, EXPECTED_SNAPSHOTS[0]) + + def test_get_snapshot_list_filtered(self): + response = self.request_gce("/fake_project/global/snapshots" + "?filter=name+eq+fake-snapshot") + expected = { + "kind": "compute#snapshotList", + "id": "projects/fake_project/global/snapshots", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/global/snapshots", + "items": [EXPECTED_SNAPSHOTS[0]] + } + + self.assertEqual(response.json_body, expected) + + def test_get_snapshot_list(self): + response = self.request_gce("/fake_project/global/snapshots") + expected_common = { + "kind": "compute#snapshotList", + "id": "projects/fake_project/global/snapshots", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/global/snapshots", + } + + response_body = copy.deepcopy(response.json_body) + self.assertIn("items", response_body) + response_items = response_body.pop("items") + self.assertDictEqual(expected_common, response_body) + self.assertDictEqual(EXPECTED_SNAPSHOTS[0], response_items[0]) + + def test_delete_snapshot_with_invalid_name(self): + response = self.request_gce("/fake_project/global" + "/snapshots/fake", method="DELETE") + self.assertEqual(404, response.status_int) + + def test_delete_snapshot(self): + response = self.request_gce( + "/fake_project/global/snapshots/fake-snapshot", + method="DELETE") + expected = { + "operationType": "delete", + "targetId": "8386122516930476063", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/global/snapshots/fake-snapshot", + } + expected.update(common.COMMON_PENDING_OPERATION) + self.assertEqual(200, response.status_int) + self.assertDictEqual(expected, response.json_body) + + def test_create_snapshot(self): + request_body = { + "name": "fake-new-snapshot", + "description": "fake description" + } + response = self.request_gce( + "/fake_project/zones/nova/disks/fake-disk-3/createSnapshot", + method="POST", + body=request_body) + self.assertEqual(200, response.status_int) + expected = { + "operationType": "createSnapshot", + "targetId": "9202387718698825406", + "targetLink": "http://localhost/compute/v1beta15/projects/" + "fake_project/zones/nova/disks/fake-disk-3", + } + expected.update(common.COMMON_ZONE_PENDING_OPERATION) + self.assertDictEqual(expected, response.json_body) diff --git a/gceapi/tests/api/test_zones.py b/gceapi/tests/api/test_zones.py new file mode 100644 index 0000000..6321d3a --- /dev/null +++ b/gceapi/tests/api/test_zones.py @@ -0,0 +1,72 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from gceapi.api import zones +from gceapi.tests.api import common + + +EXPECTED_ZONES = [{ + "id": "3924463100986466035", + "kind": "compute#zone", + "selfLink": "http://localhost/compute/v1beta15/projects/fake_project" + "/zones/nova", + "name": "nova", + "status": "UP", + "region": "nova", +}] + + +class ZonesControllerTest(common.GCEControllerTest): + """ + Test of the GCE API /zones appliication. + """ + + def setUp(self): + """Run before each test.""" + super(ZonesControllerTest, self).setUp() + self.controller = zones.Controller() + + def test_get_zone_by_invalid_name(self): + response = self.request_gce('/fake_project/zones/fakezone') + self.assertEqual(404, response.status_int) + + def test_get_zone(self): + response = self.request_gce('/fake_project/zones/nova') + expected = EXPECTED_ZONES[0] + + self.assertEqual(response.json_body, expected) + + def test_get_zone_list_filtered(self): + response = self.request_gce('/fake_project/zones?filter=name+eq+nova') + expected = { + "kind": "compute#zoneList", + "id": "projects/fake_project/zones", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/zones", + "items": [EXPECTED_ZONES[0]] + } + + self.assertEqual(response.json_body, expected) + + def test_get_zone_list(self): + response = self.request_gce('/fake_project/zones') + expected = { + "kind": "compute#zoneList", + "id": "projects/fake_project/zones", + "selfLink": "http://localhost/compute/v1beta15/projects" + "/fake_project/zones", + "items": EXPECTED_ZONES + } + + self.assertEqual(response.json_body, expected) diff --git a/gceapi/tests/api/utils.py b/gceapi/tests/api/utils.py new file mode 100644 index 0000000..383fd1b --- /dev/null +++ b/gceapi/tests/api/utils.py @@ -0,0 +1,26 @@ +# Copyright 2013 Cloudscaling Group, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class FakeObject(object): + def __init__(self, obj_dict): + for a, b in obj_dict.items(): + setattr(self, a, b) + + +def get_id(obj): + try: + return obj.id + except AttributeError: + return obj diff --git a/gceapi/version.py b/gceapi/version.py new file mode 100644 index 0000000..77852e1 --- /dev/null +++ b/gceapi/version.py @@ -0,0 +1,91 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pbr.version + +GCEAPI_VENDOR = "OpenStack Foundation" +GCEAPI_PRODUCT = "OpenStack Gceapi" +GCEAPI_PACKAGE = None # OS distro package version suffix + +loaded = False +version_info = pbr.version.VersionInfo('gceapi') +version_string = version_info.version_string + + +def _load_config(): + # Don't load in global context, since we can't assume + # these modules are accessible when distutils uses + # this module + import ConfigParser + + from oslo.config import cfg + + from gceapi.openstack.common import log as logging + + global loaded, GCEAPI_VENDOR, GCEAPI_PRODUCT, GCEAPI_PACKAGE + if loaded: + return + + loaded = True + + cfgfile = cfg.CONF.find_file("release") + if cfgfile is None: + return + + try: + cfg = ConfigParser.RawConfigParser() + cfg.read(cfgfile) + + GCEAPI_VENDOR = cfg.get("Gceapi", "vendor") + if cfg.has_option("Gceapi", "vendor"): + GCEAPI_VENDOR = cfg.get("Gceapi", "vendor") + + GCEAPI_PRODUCT = cfg.get("Gceapi", "product") + if cfg.has_option("Gceapi", "product"): + GCEAPI_PRODUCT = cfg.get("Gceapi", "product") + + GCEAPI_PACKAGE = cfg.get("Gceapi", "package") + if cfg.has_option("Gceapi", "package"): + GCEAPI_PACKAGE = cfg.get("Gceapi", "package") + except Exception as ex: + LOG = logging.getLogger(__name__) + LOG.error("Failed to load %(cfgfile)s: %(ex)s", + {'cfgfile': cfgfile, 'ex': ex}) + + +def vendor_string(): + _load_config() + + return GCEAPI_VENDOR + + +def product_string(): + _load_config() + + return GCEAPI_PRODUCT + + +def package_string(): + _load_config() + + return GCEAPI_PACKAGE + + +def version_string_with_package(): + if package_string() is None: + return version_info.version_string() + else: + return "%s-%s" % (version_info.version_string(), package_string()) diff --git a/gceapi/wsgi.py b/gceapi/wsgi.py new file mode 100644 index 0000000..5d27795 --- /dev/null +++ b/gceapi/wsgi.py @@ -0,0 +1,485 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Utility methods for working with WSGI servers.""" + +import os.path +import socket +import sys + +import eventlet.wsgi +import greenlet +from oslo.config import cfg +from paste import deploy +import routes.middleware +import ssl +import webob.dec +import webob.exc + +from gceapi import exception +from gceapi.openstack.common import excutils +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import log as logging + +wsgi_opts = [ + cfg.StrOpt('api_paste_config', + default="api-paste.ini", + help='File name for the paste.deploy config for gceapi-api'), + cfg.StrOpt('wsgi_log_format', + default='%(client_ip)s "%(request_line)s" status: %(status_code)s' + ' len: %(body_length)s time: %(wall_seconds).7f', + help='A python format string that is used as the template to ' + 'generate log lines. The following values can be formatted ' + 'into it: client_ip, date_time, request_line, status_code, ' + 'body_length, wall_seconds.'), + cfg.StrOpt('ssl_ca_file', + default=None, + help="CA certificate file to use to verify " + "connecting clients"), + cfg.StrOpt('ssl_cert_file', + default=None, + help="SSL certificate of API server"), + cfg.StrOpt('ssl_key_file', + default=None, + help="SSL private key of API server"), + cfg.IntOpt('tcp_keepidle', + default=600, + help="Sets the value of TCP_KEEPIDLE in seconds for each " + "server socket. Not supported on OS X.") + ] +CONF = cfg.CONF +CONF.register_opts(wsgi_opts) + +LOG = logging.getLogger(__name__) + + +class Server(object): + """Server class to manage a WSGI server, serving a WSGI application.""" + + default_pool_size = 1000 + + def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None, + protocol=eventlet.wsgi.HttpProtocol, backlog=128, + use_ssl=False, max_url_len=None): + """Initialize, but do not start, a WSGI server. + + :param name: Pretty name for logging. + :param app: The WSGI application to serve. + :param host: IP address to serve the application. + :param port: Port number to server the application. + :param pool_size: Maximum number of eventlets to spawn concurrently. + :param backlog: Maximum number of queued connections. + :param max_url_len: Maximum length of permitted URLs. + :returns: None + :raises: gceapi.exception.InvalidInput + """ + self.name = name + self.app = app + self._server = None + self._protocol = protocol + self._pool = eventlet.GreenPool(pool_size or self.default_pool_size) + self._logger = logging.getLogger("gceapi.%s.wsgi.server" % self.name) + self._wsgi_logger = logging.WritableLogger(self._logger) + self._use_ssl = use_ssl + self._max_url_len = max_url_len + + if backlog < 1: + raise exception.InvalidInput( + reason='The backlog must be more than 1') + + bind_addr = (host, port) + # TODO(dims): eventlet's green dns/socket module does not actually + # support IPv6 in getaddrinfo(). We need to get around this in the + # future or monitor upstream for a fix + try: + info = socket.getaddrinfo(bind_addr[0], + bind_addr[1], + socket.AF_UNSPEC, + socket.SOCK_STREAM)[0] + family = info[0] + bind_addr = info[-1] + except Exception: + family = socket.AF_INET + + self._socket = eventlet.listen(bind_addr, family, backlog=backlog) + (self.host, self.port) = self._socket.getsockname()[0:2] + LOG.info(_("%(name)s listening on %(host)s:%(port)s") % self.__dict__) + + def start(self): + """Start serving a WSGI application. + + :returns: None + """ + if self._use_ssl: + try: + ca_file = CONF.ssl_ca_file + cert_file = CONF.ssl_cert_file + key_file = CONF.ssl_key_file + + if cert_file and not os.path.exists(cert_file): + raise RuntimeError( + _("Unable to find cert_file : %s") % cert_file) + + if ca_file and not os.path.exists(ca_file): + raise RuntimeError( + _("Unable to find ca_file : %s") % ca_file) + + if key_file and not os.path.exists(key_file): + raise RuntimeError( + _("Unable to find key_file : %s") % key_file) + + if self._use_ssl and (not cert_file or not key_file): + raise RuntimeError( + _("When running server in SSL mode, you must " + "specify both a cert_file and key_file " + "option value in your configuration file")) + ssl_kwargs = { + 'server_side': True, + 'certfile': cert_file, + 'keyfile': key_file, + 'cert_reqs': ssl.CERT_NONE, + } + + if CONF.ssl_ca_file: + ssl_kwargs['ca_certs'] = ca_file + ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED + + self._socket = eventlet.wrap_ssl(self._socket, + **ssl_kwargs) + + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR, 1) + # sockets can hang around forever without keepalive + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_KEEPALIVE, 1) + + # This option isn't available in the OS X version of eventlet + if hasattr(socket, 'TCP_KEEPIDLE'): + self._socket.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPIDLE, + CONF.tcp_keepidle) + + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_("Failed to start %(name)s on %(host)s" + ":%(port)s with SSL support") % self.__dict__) + + wsgi_kwargs = { + 'func': eventlet.wsgi.server, + 'sock': self._socket, + 'site': self.app, + 'protocol': self._protocol, + 'custom_pool': self._pool, + 'log': self._wsgi_logger, + 'log_format': CONF.wsgi_log_format + } + + if self._max_url_len: + wsgi_kwargs['url_length_limit'] = self._max_url_len + + self._server = eventlet.spawn(**wsgi_kwargs) + + def stop(self): + """Stop this server. + + This is not a very nice action, as currently the method by which a + server is stopped is by killing its eventlet. + + :returns: None + + """ + LOG.info(_("Stopping WSGI server.")) + + if self._server is not None: + # Resize pool to stop new requests from being processed + self._pool.resize(0) + self._server.kill() + + def wait(self): + """Block, until the server has stopped. + + Waits on the server's eventlet to finish, then returns. + + :returns: None + + """ + try: + self._server.wait() + except greenlet.GreenletExit: + LOG.info(_("WSGI server has stopped.")) + + +class Request(webob.Request): + pass + + +class Application(object): + """Base WSGI application wrapper. Subclasses need to implement __call__.""" + + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config files. + + Any local configuration (that is, values under the [app:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [app:wadl] + latest_version = 1.3 + paste.app_factory = gceapi.api.fancy_api:Wadl.factory + + which would result in a call to the `Wadl` class as + + import gceapi.api.fancy_api + fancy_api.Wadl(latest_version='1.3') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + return cls(**local_config) + + def __call__(self, environ, start_response): + r"""Subclasses will probably want to implement __call__ like this: + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + # Any of the following objects work as responses: + + # Option 1: simple string + res = 'message\n' + + # Option 2: a nicely formatted HTTP exception page + res = exc.HTTPForbidden(detail='Nice try') + + # Option 3: a webob Response object (in case you need to play with + # headers, or you want to be treated like an iterable, or or or) + res = Response(); + res.app_iter = open('somefile') + + # Option 4: any wsgi app to be run next + res = self.application + + # Option 5: you can get a Response object for a wsgi app, too, to + # play with headers etc + res = req.get_response(self.application) + + # You can then just return your response... + return res + # ... or set req.response and return None. + req.response = res + + See the end of http://pythonpaste.org/webob/modules/dec.html + for more info. + + """ + raise NotImplementedError(_('You must implement __call__')) + + +class Middleware(Application): + """Base WSGI middleware. + + These classes require an application to be + initialized that will be called next. By default the middleware will + simply call its wrapped app, or you can override __call__ to customize its + behavior. + + """ + + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config files. + + Any local configuration (that is, values under the [filter:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [filter:analytics] + redis_host = 127.0.0.1 + paste.filter_factory = gceapi.api.analytics:Analytics.factory + + which would result in a call to the `Analytics` class as + + import gceapi.api.analytics + analytics.Analytics(app_from_paste, redis_host='127.0.0.1') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + def _factory(app): + return cls(app, **local_config) + return _factory + + def __init__(self, application): + self.application = application + + def process_request(self, req): + """Called on each request. + + If this returns None, the next application down the stack will be + executed. If it returns a response then that response will be returned + and execution will stop here. + + """ + return None + + def process_response(self, response): + """Do whatever you'd like to the response.""" + return response + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + response = self.process_request(req) + if response: + return response + response = req.get_response(self.application) + return self.process_response(response) + + +class Debug(Middleware): + """Helper class for debugging a WSGI application. + + Can be inserted into any WSGI application chain to get information + about the request and response. + + """ + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + print(('*' * 40) + ' REQUEST ENVIRON') + for key, value in req.environ.items(): + print(key, '=', value) + print() + resp = req.get_response(self.application) + + print(('*' * 40) + ' RESPONSE HEADERS') + for (key, value) in resp.headers.iteritems(): + print(key, '=', value) + print() + + resp.app_iter = self.print_generator(resp.app_iter) + + return resp + + @staticmethod + def print_generator(app_iter): + """Iterator that prints the contents of a wrapper string.""" + print ('*' * 40) + ' BODY' + for part in app_iter: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print + + +class Router(object): + """WSGI middleware that maps incoming requests to WSGI apps.""" + + def __init__(self, mapper): + """Create a router for the given routes.Mapper. + + Each route in `mapper` must specify a 'controller', which is a + WSGI app to call. You'll probably want to specify an 'action' as + well and have your controller be an object that can route + the request to the action-specific method. + + Examples: + mapper = routes.Mapper() + sc = ServerController() + + # Explicit mapping of one route to a controller+action + mapper.connect(None, '/svrlist', controller=sc, action='list') + + # Actions are all implicitly defined + mapper.resource('server', 'servers', controller=sc) + + # Pointing to an arbitrary WSGI app. You can specify the + # {path_info:.*} parameter so the target app can be handed just that + # section of the URL. + mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp()) + + """ + self.map = mapper + self._router = routes.middleware.RoutesMiddleware(self._dispatch, + self.map) + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + """Route the incoming request to a controller based on self.map. + + If no match, return a 404. + + """ + return self._router + + @staticmethod + @webob.dec.wsgify(RequestClass=Request) + def _dispatch(req): + """Dispatch the request to the appropriate controller. + + Called by self._router after matching the incoming request to a route + and putting the information into req.environ. Either returns 404 + or the routed WSGI app's response. + + """ + match = req.environ['wsgiorg.routing_args'][1] + if not match: + return webob.exc.HTTPNotFound() + app = match['controller'] + return app + + +class Loader(object): + """Used to load WSGI applications from paste configurations.""" + + def __init__(self, config_path=None): + """Initialize the loader, and attempt to find the config. + + :param config_path: Full or relative path to the paste config. + :returns: None + + """ + config_path = config_path or CONF.api_paste_config + if os.path.exists(config_path): + self.config_path = config_path + else: + self.config_path = CONF.find_file(config_path) + if not self.config_path: + raise exception.ConfigNotFound(path=config_path) + + def load_app(self, name): + """Return the paste URLMap wrapped WSGI application. + + :param name: Name of the application to load. + :returns: Paste URLMap object wrapping the requested application. + :raises: `gceapi.exception.PasteAppNotFound` + + """ + try: + LOG.debug(_("Loading app %(name)s from %(path)s") % + {'name': name, 'path': self.config_path}) + return deploy.loadapp("config:%s" % self.config_path, name=name) + except LookupError as err: + LOG.error(err) + raise exception.PasteAppNotFound(name=name, path=self.config_path) diff --git a/gceapi/wsgi_ext.py b/gceapi/wsgi_ext.py new file mode 100644 index 0000000..62e58d3 --- /dev/null +++ b/gceapi/wsgi_ext.py @@ -0,0 +1,798 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 IBM Corp. +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import math +import re +import routes +import time + +import webob + +from gceapi import exception +from gceapi.openstack.common import gettextutils +from gceapi.openstack.common.gettextutils import _ +from gceapi.openstack.common import jsonutils +from gceapi.openstack.common import log as logging +from gceapi import wsgi + + +LOG = logging.getLogger(__name__) + +SUPPORTED_CONTENT_TYPES = ( + 'application/json', + 'application/vnd.openstack.compute+json', +) + +_MEDIA_TYPE_MAP = { + 'application/vnd.openstack.compute+json': 'json', + 'application/json': 'json', +} + +_SANITIZE_KEYS = ['adminPass', 'admin_pass'] + +_SANITIZE_PATTERNS = [ + re.compile(r'(adminPass\s*[=]\s*[\"\']).*?([\"\'])', re.DOTALL), + re.compile(r'(admin_pass\s*[=]\s*[\"\']).*?([\"\'])', re.DOTALL), + re.compile(r'().*?()', re.DOTALL), + re.compile(r'().*?()', re.DOTALL), + re.compile(r'([\"\']adminPass[\"\']\s*:\s*[\"\']).*?([\"\'])', re.DOTALL), + re.compile(r'([\"\']admin_pass[\"\']\s*:\s*[\"\']).*?([\"\'])', re.DOTALL) +] + + +class APIMapper(routes.Mapper): + def routematch(self, url=None, environ=None): + if url == "": + result = self._match("", environ) + return result[0], result[1] + return routes.Mapper.routematch(self, url, environ) + + def connect(self, *args, **kargs): + # NOTE(vish): Default the format part of a route to only accept json + # and xml so it doesn't eat all characters after a '.' + # in the url. + kargs.setdefault('requirements', {}) + if not kargs['requirements'].get('format'): + kargs['requirements']['format'] = 'json|xml' + return routes.Mapper.connect(self, *args, **kargs) + + +class ProjectMapper(APIMapper): + def resource(self, member_name, collection_name, **kwargs): + if 'parent_resource' not in kwargs: + kwargs['path_prefix'] = '{project_id}/' + else: + parent_resource = kwargs['parent_resource'] + p_collection = parent_resource['collection_name'] + p_member = parent_resource['member_name'] + kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection, + p_member) + routes.Mapper.resource(self, member_name, + collection_name, + **kwargs) + + +class Request(webob.Request): + """Add some OpenStack API-specific logic to the base webob.Request.""" + + def __init__(self, *args, **kwargs): + super(Request, self).__init__(*args, **kwargs) + self._extension_data = {'db_items': {}} + + def cache_db_items(self, key, items, item_key='id'): + """ + Allow API methods to store objects from a DB query to be + used by API extensions within the same API request. + + An instance of this class only lives for the lifetime of a + single API request, so there's no need to implement full + cache management. + """ + db_items = self._extension_data['db_items'].setdefault(key, {}) + for item in items: + db_items[item[item_key]] = item + + def get_db_items(self, key): + """ + Allow an API extension to get previously stored objects within + the same API request. + + Note that the object data will be slightly stale. + """ + return self._extension_data['db_items'][key] + + def get_db_item(self, key, item_key): + """ + Allow an API extension to get a previously stored object + within the same API request. + + Note that the object data will be slightly stale. + """ + return self.get_db_items(key).get(item_key) + + def cache_db_instances(self, instances): + self.cache_db_items('instances', instances, 'uuid') + + def cache_db_instance(self, instance): + self.cache_db_items('instances', [instance], 'uuid') + + def get_db_instances(self): + return self.get_db_items('instances') + + def get_db_instance(self, instance_uuid): + return self.get_db_item('instances', instance_uuid) + + def cache_db_flavors(self, flavors): + self.cache_db_items('flavors', flavors, 'flavorid') + + def cache_db_flavor(self, flavor): + self.cache_db_items('flavors', [flavor], 'flavorid') + + def get_db_flavors(self): + return self.get_db_items('flavors') + + def get_db_flavor(self, flavorid): + return self.get_db_item('flavors', flavorid) + + def best_match_content_type(self): + """Determine the requested response content-type.""" + if 'nova.best_content_type' not in self.environ: + # Calculate the best MIME type + content_type = None + + # Check URL path suffix + parts = self.path.rsplit('.', 1) + if len(parts) > 1: + possible_type = 'application/' + parts[1] + if possible_type in SUPPORTED_CONTENT_TYPES: + content_type = possible_type + + if not content_type: + content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES) + + self.environ['nova.best_content_type'] = (content_type or + 'application/json') + + return self.environ['nova.best_content_type'] + + def get_content_type(self): + """Determine content type of the request body. + + Does not do any body introspection, only checks header + + """ + if "Content-Type" not in self.headers: + return None + + content_type = self.content_type + + # NOTE(markmc): text/plain is the default for eventlet and + # other webservers which use mimetools.Message.gettype() + # whereas twisted defaults to ''. + if not content_type or content_type == 'text/plain': + return None + + if content_type not in SUPPORTED_CONTENT_TYPES: + raise exception.InvalidContentType(content_type=content_type) + + return content_type + + def best_match_language(self): + """Determine the best available language for the request. + + :returns: the best language match or None if the 'Accept-Language' + header was not available in the request. + """ + if not self.accept_language: + return None + return self.accept_language.best_match( + gettextutils.get_available_languages('gceapi')) + + +class ActionDispatcher(object): + """Maps method name to local methods through action name.""" + + def dispatch(self, *args, **kwargs): + """Find and call local method.""" + action = kwargs.pop('action', 'default') + action_method = getattr(self, str(action), self.default) + return action_method(*args, **kwargs) + + def default(self, data): + raise NotImplementedError() + + +class TextDeserializer(ActionDispatcher): + """Default request body deserialization.""" + + def deserialize(self, datastring, action='default'): + return self.dispatch(datastring, action=action) + + def default(self, datastring): + return {} + + +class DictSerializer(ActionDispatcher): + """Default request body serialization.""" + + def serialize(self, data, action='default'): + return self.dispatch(data, action=action) + + def default(self, data): + return "" + + +class JSONDeserializer(TextDeserializer): + + def _from_json(self, datastring): + try: + return jsonutils.loads(datastring) + except ValueError: + msg = _("cannot understand JSON") + raise exception.MalformedRequestBody(reason=msg) + + def default(self, datastring): + return {'body': self._from_json(datastring)} + + +class JSONDictSerializer(DictSerializer): + """Default JSON request body serialization.""" + + def default(self, data): + return jsonutils.dumps(data) + + +def serializers(**serializers): + """Attaches serializers to a method. + + This decorator associates a dictionary of serializers with a + method. Note that the function attributes are directly + manipulated; the method is not wrapped. + """ + + def decorator(func): + if not hasattr(func, 'wsgi_serializers'): + func.wsgi_serializers = {} + func.wsgi_serializers.update(serializers) + return func + return decorator + + +def deserializers(**deserializers): + """Attaches deserializers to a method. + + This decorator associates a dictionary of deserializers with a + method. Note that the function attributes are directly + manipulated; the method is not wrapped. + """ + + def decorator(func): + if not hasattr(func, 'wsgi_deserializers'): + func.wsgi_deserializers = {} + func.wsgi_deserializers.update(deserializers) + return func + return decorator + + +def response(code): + """Attaches response code to a method. + + This decorator associates a response code with a method. Note + that the function attributes are directly manipulated; the method + is not wrapped. + """ + + def decorator(func): + func.wsgi_code = code + return func + return decorator + + +class ResponseObject(object): + """Bundles a response object with appropriate serializers. + + Object that app methods may return in order to bind alternate + serializers with a response object to be serialized. Its use is + optional. + """ + + def __init__(self, obj, code=None, headers=None, **serializers): + """Binds serializers with an object. + + Takes keyword arguments akin to the @serializer() decorator + for specifying serializers. Serializers specified will be + given preference over default serializers or method-specific + serializers on return. + """ + + self.obj = obj + self.serializers = serializers + self._default_code = 200 + self._code = code + self._headers = headers or {} + self.serializer = None + self.media_type = None + + def __getitem__(self, key): + """Retrieves a header with the given name.""" + + return self._headers[key.lower()] + + def __setitem__(self, key, value): + """Sets a header with the given name to the given value.""" + + self._headers[key.lower()] = value + + def __delitem__(self, key): + """Deletes the header with the given name.""" + + del self._headers[key.lower()] + + def _bind_method_serializers(self, meth_serializers): + """Binds method serializers with the response object. + + Binds the method serializers with the response object. + Serializers specified to the constructor will take precedence + over serializers specified to this method. + + :param meth_serializers: A dictionary with keys mapping to + response types and values containing + serializer objects. + """ + + # We can't use update because that would be the wrong + # precedence + for mtype, serializer in meth_serializers.items(): + self.serializers.setdefault(mtype, serializer) + + def get_serializer(self, content_type, default_serializers=None): + """Returns the serializer for the wrapped object. + + Returns the serializer for the wrapped object subject to the + indicated content type. If no serializer matching the content + type is attached, an appropriate serializer drawn from the + default serializers will be used. If no appropriate + serializer is available, raises InvalidContentType. + """ + + default_serializers = default_serializers or {} + + try: + mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) + if mtype in self.serializers: + return mtype, self.serializers[mtype] + else: + return mtype, default_serializers[mtype] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + def preserialize(self, content_type, default_serializers=None): + """Prepares the serializer that will be used to serialize. + + Determines the serializer that will be used and prepares an + instance of it for later call. This allows the serializer to + be accessed by extensions for, e.g., template extension. + """ + + mtype, serializer = self.get_serializer(content_type, + default_serializers) + self.media_type = mtype + self.serializer = serializer() + + def attach(self, **kwargs): + """Attach slave templates to serializers.""" + + if self.media_type in kwargs: + self.serializer.attach(kwargs[self.media_type]) + + def serialize(self, request, content_type, default_serializers=None): + """Serializes the wrapped object. + + Utility method for serializing the wrapped object. Returns a + webob.Response object. + """ + + if self.serializer: + serializer = self.serializer + else: + _mtype, _serializer = self.get_serializer(content_type, + default_serializers) + serializer = _serializer() + + response = webob.Response() + response.status_int = self.code + for hdr, value in self._headers.items(): + response.headers[hdr] = str(value) + response.headers['Content-Type'] = content_type + if self.obj is not None: + response.body = serializer.serialize(self.obj) + + return response + + @property + def code(self): + """Retrieve the response status.""" + + return self._code or self._default_code + + @property + def headers(self): + """Retrieve the headers.""" + + return self._headers.copy() + + +class ResourceExceptionHandler(object): + """Context manager to handle Resource exceptions. + + Used when processing exceptions generated by API implementation + methods (or their extensions). Converts most exceptions to Fault + exceptions, with the appropriate logging. + """ + + def __enter__(self): + return None + + def __exit__(self, ex_type, ex_value, ex_traceback): + if not ex_value: + return True + + if isinstance(ex_value, exception.NotAuthorized): + raise Fault(webob.exc.HTTPForbidden( + explanation=ex_value.format_message())) + elif isinstance(ex_value, exception.Invalid): + raise Fault(exception.ConvertedException( + code=ex_value.code, + explanation=ex_value.format_message())) + + # Under python 2.6, TypeError's exception value is actually a string, + # so test # here via ex_type instead: + # http://bugs.python.org/issue7853 + elif issubclass(ex_type, TypeError): + exc_info = (ex_type, ex_value, ex_traceback) + LOG.error(_('Exception handling resource: %s') % ex_value, + exc_info=exc_info) + raise Fault(webob.exc.HTTPBadRequest()) + elif isinstance(ex_value, Fault): + LOG.info(_("Fault thrown: %s"), unicode(ex_value)) + raise ex_value + elif isinstance(ex_value, webob.exc.HTTPException): + LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value)) + raise Fault(ex_value) + + # We didn't handle the exception + return False + + +def sanitize(msg): + if not (key in msg for key in _SANITIZE_KEYS): + return msg + + for pattern in _SANITIZE_PATTERNS: + msg = re.sub(pattern, r'\1****\2', msg) + return msg + + +class Resource(wsgi.Application): + """WSGI app that handles (de)serialization and controller dispatch. + + WSGI app that reads routing information supplied by RoutesMiddleware + and calls the requested action method upon its controller. All + controller action methods must accept a 'req' argument, which is the + incoming wsgi.Request. If the operation is a PUT or POST, the controller + method must also accept a 'body' argument (the deserialized request body). + They may raise a webob.exc exception or return a dict, which will be + serialized by requested content type. + + Exceptions derived from webob.exc.HTTPException will be automatically + wrapped in Fault() to provide API friendly error responses. + + """ + + def __init__(self, controller, **deserializers): + """ + :param controller: object that implement methods created by routes lib + """ + + self.controller = controller + + default_deserializers = dict(json=JSONDeserializer) + default_deserializers.update(deserializers) + + self.default_deserializers = default_deserializers + self.default_serializers = dict(json=JSONDictSerializer) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + + # NOTE(Vek): Check for get_action_args() override in the + # controller + if hasattr(self.controller, 'get_action_args'): + return self.controller.get_action_args(request_environment) + + try: + args = request_environment['wsgiorg.routing_args'][1].copy() + except (KeyError, IndexError, AttributeError): + return {} + + try: + del args['controller'] + except KeyError: + pass + + try: + del args['format'] + except KeyError: + pass + + return args + + def get_body(self, request): + try: + content_type = request.get_content_type() + except exception.InvalidContentType: + LOG.debug(_("Unrecognized Content-Type provided in request")) + return None, '' + + if not content_type: + LOG.debug(_("No Content-Type provided in request")) + return None, '' + + if len(request.body) <= 0: + LOG.debug(_("Empty body provided in request")) + return None, '' + + return content_type, request.body + + def deserialize(self, meth, content_type, body): + meth_deserializers = getattr(meth, 'wsgi_deserializers', {}) + try: + mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) + if mtype in meth_deserializers: + deserializer = meth_deserializers[mtype] + else: + deserializer = self.default_deserializers[mtype] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + if (hasattr(deserializer, 'want_controller') + and deserializer.want_controller): + return deserializer(self.controller).deserialize(body) + else: + return deserializer().deserialize(body) + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """WSGI method that controls (de)serialization and method dispatch.""" + + # Identify the action, its arguments, and the requested + # content type + action_args = self.get_action_args(request.environ) + action = action_args.pop('action', None) + content_type, body = self.get_body(request) + accept = request.best_match_content_type() + + # NOTE(Vek): Splitting the function up this way allows for + # auditing by external tools that wrap the existing + # function. If we try to audit __call__(), we can + # run into troubles due to the @webob.dec.wsgify() + # decorator. + return self._process_stack(request, action, action_args, + content_type, body, accept) + + def _process_stack(self, request, action, action_args, + content_type, body, accept): + """Implement the processing stack.""" + + # Get the implementing method + try: + meth = self.get_method(request, action, content_type, body) + except (AttributeError, TypeError): + return Fault(webob.exc.HTTPNotFound()) + except KeyError as ex: + msg = _("There is no such action: %s") % ex.args[0] + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + + if body: + msg = _("Action: '%(action)s', body: " + "%(body)s") % {'action': action, + 'body': unicode(body, 'utf-8')} + LOG.debug(sanitize(msg)) + LOG.debug(_("Calling method %s") % str(meth)) + + # Now, deserialize the request body... + try: + if content_type: + contents = self.deserialize(meth, content_type, body) + else: + contents = {} + except exception.InvalidContentType: + msg = _("Unsupported Content-Type") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + + # Update the action args + action_args.update(contents) + + response = None + try: + with ResourceExceptionHandler(): + action_result = self.dispatch(meth, request, action_args) + except Fault as ex: + response = ex + + if not response: + # No exceptions; convert action_result into a + # ResponseObject + resp_obj = None + if type(action_result) is dict or action_result is None: + resp_obj = ResponseObject(action_result) + elif isinstance(action_result, ResponseObject): + resp_obj = action_result + else: + response = action_result + + # Run post-processing extensions + if resp_obj: + # Do a preserialize to set up the response object + serializers = getattr(meth, 'wsgi_serializers', {}) + resp_obj._bind_method_serializers(serializers) + if hasattr(meth, 'wsgi_code'): + resp_obj._default_code = meth.wsgi_code + resp_obj.preserialize(accept, self.default_serializers) + response = resp_obj.serialize(request, accept, + self.default_serializers) + return response + + def get_method(self, request, action, content_type, body): + """Look up the action-specific method and its extensions.""" + + # Look up the method + if self.controller: + return getattr(self.controller, action) + else: + return getattr(self, action) + + def dispatch(self, method, request, action_args): + """Dispatch a call to the action-specific method.""" + + return method(req=request, **action_args) + + +class Fault(webob.exc.HTTPException): + """Wrap webob.exc.HTTPException to provide API friendly response.""" + + _fault_names = { + 400: "badRequest", + 401: "unauthorized", + 403: "forbidden", + 404: "itemNotFound", + 405: "badMethod", + 409: "conflictingRequest", + 413: "overLimit", + 415: "badMediaType", + 429: "overLimit", + 501: "notImplemented", + 503: "serviceUnavailable"} + + def __init__(self, exception): + """Create a Fault for the given webob.exc.exception.""" + self.wrapped_exc = exception + for key, value in self.wrapped_exc.headers.items(): + self.wrapped_exc.headers[key] = str(value) + self.status_int = exception.status_int + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + """Generate a WSGI response based on the exception passed to ctor.""" + + user_locale = req.best_match_language() + # Replace the body with fault details. + code = self.wrapped_exc.status_int + fault_name = self._fault_names.get(code, "computeFault") + explanation = self.wrapped_exc.explanation + LOG.debug(_("Returning %(code)s to user: %(explanation)s"), + {'code': code, 'explanation': explanation}) + + explanation = gettextutils.translate(explanation, user_locale) + fault_data = { + fault_name: { + 'code': code, + 'message': explanation}} + if code == 413 or code == 429: + retry = self.wrapped_exc.headers.get('Retry-After', None) + if retry: + fault_data[fault_name]['retryAfter'] = retry + + # 'code' is an attribute on the fault tag itself + metadata = {'attributes': {fault_name: 'code'}} + + content_type = req.best_match_content_type() + serializer = { + 'application/json': JSONDictSerializer(), + }[content_type] + + self.wrapped_exc.body = serializer.serialize(fault_data) + self.wrapped_exc.content_type = content_type + + return self.wrapped_exc + + def __str__(self): + return self.wrapped_exc.__str__() + + +class RateLimitFault(webob.exc.HTTPException): + """ + Rate-limited request response. + """ + + def __init__(self, message, details, retry_time): + """ + Initialize new `RateLimitFault` with relevant information. + """ + hdrs = RateLimitFault._retry_after(retry_time) + self.wrapped_exc = webob.exc.HTTPTooManyRequests(headers=hdrs) + self.content = { + "overLimit": { + "code": self.wrapped_exc.status_int, + "message": message, + "details": details, + "retryAfter": hdrs['Retry-After'], + }, + } + + @staticmethod + def _retry_after(retry_time): + delay = int(math.ceil(retry_time - time.time())) + retry_after = delay if delay > 0 else 0 + headers = {'Retry-After': '%d' % retry_after} + return headers + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """ + Return the wrapped exception with a serialized body conforming to our + error format. + """ + user_locale = request.best_match_language() + content_type = request.best_match_content_type() + metadata = {"attributes": {"overLimit": ["code", "retryAfter"]}} + + self.content['overLimit']['message'] = \ + gettextutils.translate( + self.content['overLimit']['message'], + user_locale) + self.content['overLimit']['details'] = \ + gettextutils.translate( + self.content['overLimit']['details'], + user_locale) + + serializer = { + 'application/json': JSONDictSerializer(), + }[content_type] + + content = serializer.serialize(self.content) + self.wrapped_exc.body = content + self.wrapped_exc.content_type = content_type + + return self.wrapped_exc diff --git a/openstack-common.conf b/openstack-common.conf new file mode 100644 index 0000000..8f8e19b --- /dev/null +++ b/openstack-common.conf @@ -0,0 +1,7 @@ +[DEFAULT] + +# The list of modules to copy from openstack-common +modules=db,db.sqlalchemy,eventlet_backdoor,gettextutils,excutils,jsonutils,local,timeutils + +# The base module to hold the copy of openstack.common +base=gceapi diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..3e2eb3a --- /dev/null +++ b/requirements.txt @@ -0,0 +1,26 @@ +anyjson>=0.3.3 +argparse +Babel>=1.3 +eventlet>=0.13.0 +greenlet>=0.3.2 +iso8601>=0.1.8 +jsonschema>=2.0.0,<3.0.0 +oslo.config>=1.2.0 +paramiko>=1.9.0 +Paste +PasteDeploy>=1.5.0 +pbr>=0.5.21,<1.0 +pyasn1 +python-cinderclient>=1.0.6 +python-glanceclient>=0.9.0 +python-keystoneclient>=0.4.2 +python-neutronclient>=2.3.3,<3 +python-novaclient>=2.15.0 +Routes>=1.12.3 +six>=1.4.1 +SQLAlchemy>=0.7.8,<=0.8.99 +sqlalchemy-migrate>=0.8.2 +stevedore>=0.14 +suds>=0.4 +WebOb>=1.2.3 +websockify>=0.5.1,<0.6 diff --git a/run_tests.sh b/run_tests.sh new file mode 100755 index 0000000..385d0df --- /dev/null +++ b/run_tests.sh @@ -0,0 +1,123 @@ +#!/bin/bash + +function usage { + echo "Usage: $0 [OPTION]..." + echo "Run Gceapi's test suite(s)" + echo "" + echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" + echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" + echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." + echo " -u, --update Update the virtual environment with any newer package versions" + echo " --unittests-only Run unit tests only, exclude functional tests." + echo " -p, --flake8 Just run flake8" + echo " -P, --no-flake8 Don't run static code checks" + echo " -h, --help Print this usage message" + echo "" + echo "Note: with no options specified, the script will try to run the tests in a virtual environment," + echo " If no virtualenv is found, the script will ask if you would like to create one. If you " + echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." + exit +} + +function process_option { + case "$1" in + -h|--help) usage;; + -V|--virtual-env) let always_venv=1; let never_venv=0;; + -N|--no-virtual-env) let always_venv=0; let never_venv=1;; + -p|--flake8) let just_flake8=1;; + -P|--no-flake8) let no_flake8=1;; + -f|--force) let force=1;; + -u|--update) update=1;; + --unittests-only) noseopts="$noseopts --exclude-dir=gceapi/tests/functional";; + -c|--coverage) noseopts="$noseopts --with-coverage --cover-package=gceapi";; + -*) noseopts="$noseopts $1";; + *) noseargs="$noseargs $1" + esac +} + +venv=.venv +with_venv=tools/with_venv.sh +always_venv=0 +never_venv=0 +force=0 +noseopts= +noseargs= +wrapper="" +just_flake8=0 +no_flake8=0 +update=0 + +export NOSE_WITH_OPENSTACK=1 +export NOSE_OPENSTACK_COLOR=1 +export NOSE_OPENSTACK_RED=0.05 +export NOSE_OPENSTACK_YELLOW=0.025 +export NOSE_OPENSTACK_SHOW_ELAPSED=1 +export NOSE_OPENSTACK_STDOUT=1 + +for arg in "$@"; do + process_option $arg +done + +function run_tests { + # Cleanup *pyc + ${wrapper} find . -type f -name "*.pyc" -delete + # Just run the test suites in current environment + ${wrapper} rm -f tests.sqlite + ${wrapper} $NOSETESTS +} + +function run_flake8 { + echo "Running flake8 ..." + if [ $never_venv -eq 1 ]; then + echo "**WARNING**:" >&2 + echo "Running flake8 without virtual env may miss OpenStack HACKING detection" >&2 + fi + + ${wrapper} flake8 +} + + +NOSETESTS="nosetests $noseopts $noseargs" + +if [ $never_venv -eq 0 ] +then + # Remove the virtual environment if --force used + if [ $force -eq 1 ]; then + echo "Cleaning virtualenv..." + rm -rf ${venv} + fi + if [ $update -eq 1 ]; then + echo "Updating virtualenv..." + python tools/install_venv.py + fi + if [ -e ${venv} ]; then + wrapper="${with_venv}" + else + if [ $always_venv -eq 1 ]; then + # Automatically install the virtualenv + python tools/install_venv.py + wrapper="${with_venv}" + else + echo -e "No virtual environment found...create one? (Y/n) \c" + read use_ve + if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then + # Install the virtualenv and run the test suite in it + python tools/install_venv.py + wrapper=${with_venv} + fi + fi + fi +fi + +if [ $just_flake8 -eq 1 ]; then + run_flake8 + exit +fi + +run_tests || exit + +if [ -z "$noseargs" ]; then + if [ $no_flake8 -eq 0 ]; then + run_flake8 + fi +fi diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..7e3c2de --- /dev/null +++ b/setup.cfg @@ -0,0 +1,69 @@ +[metadata] +name = gce-api +version = 2014.1.1 +summary = OpenStack Gceapi Service +description-file = + README.rst +author = OpenStack +author-email = openstack-dev@lists.openstack.org +home-page = http://www.openstack.org/ +classifier = + Environment :: OpenStack + Intended Audience :: Information Technology + Intended Audience :: System Administrators + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + Programming Language :: Python :: 2.6 + +[files] +packages = + gceapi +scripts = + bin/gceapi-db-setup + +[global] +setup-hooks = + pbr.hooks.setup_hook + +[entry_points] +console_scripts = + gce-api=gceapi.cmd.api:main + gce-api-manage=gceapi.cmd.manage:main + +[build_sphinx] +all_files = 1 +build-dir = doc/build +source-dir = doc/source + +[egg_info] +tag_build = +tag_date = 0 +tag_svn_revision = 0 + +[compile_catalog] +directory = gceapi/locale +domain = gceapi + +[update_catalog] +domain = gceapi +output_dir = gceapi/locale +input_file = gceapi/locale/gceapi.pot + +[extract_messages] +keywords = _ gettext ngettext l_ lazy_gettext +mapping_file = babel.cfg +output_file = gceapi/locale/gceapi.pot + +[nosetests] +# NOTE(jkoelker) To run the test suite under nose install the following +# coverage http://pypi.python.org/pypi/coverage +# tissue http://pypi.python.org/pypi/tissue (pep8 checker) +# openstack-nose https://github.com/jkoelker/openstack-nose +verbosity=2 +tests=gceapi/tests +cover-package = gceapi +cover-html = true +cover-erase = true diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..70c2b3f --- /dev/null +++ b/setup.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT +import setuptools + +setuptools.setup( + setup_requires=['pbr'], + pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 0000000..e77512f --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,14 @@ +coverage>=3.6 +discover +feedparser +fixtures>=0.3.14 +hacking>=0.8.0,<0.9 +mox>=0.5.3 +mock>=1.0 +oslo.sphinx +psycopg2 +pylint==0.25.2 +python-subunit>=0.0.18 +sphinx>=1.1.2,<1.2 +testrepository>=0.0.17 +testtools>=0.9.34 diff --git a/tools/db/schema_diff.py b/tools/db/schema_diff.py new file mode 100755 index 0000000..a550499 --- /dev/null +++ b/tools/db/schema_diff.py @@ -0,0 +1,270 @@ +#!/usr/bin/env python + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Utility for diff'ing two versions of the DB schema. + +Each release cycle the plan is to compact all of the migrations from that +release into a single file. This is a manual and, unfortunately, error-prone +process. To ensure that the schema doesn't change, this tool can be used to +diff the compacted DB schema to the original, uncompacted form. + + +The schema versions are specified by providing a git ref (a branch name or +commit hash) and a SQLAlchemy-Migrate version number: +Run like: + + ./tools/db/schema_diff.py mysql master:latest my_branch:82 +""" +import datetime +import glob +import os +import subprocess +import sys + + +### Dump + + +def dump_db(db_driver, db_name, migration_version, dump_filename): + db_driver.create(db_name) + try: + migrate(db_driver, db_name, migration_version) + db_driver.dump(db_name, dump_filename) + finally: + db_driver.drop(db_name) + + +### Diff + + +def diff_files(filename1, filename2): + pipeline = ['diff -U 3 %(filename1)s %(filename2)s' % locals()] + + # Use colordiff if available + if subprocess.call(['which', 'colordiff']) == 0: + pipeline.append('colordiff') + + pipeline.append('less -R') + + cmd = ' | '.join(pipeline) + subprocess.check_call(cmd, shell=True) + + +### Database + + +class MySQL(object): + def create(self, name): + subprocess.check_call(['mysqladmin', '-u', 'root', 'create', name]) + + def drop(self, name): + subprocess.check_call(['mysqladmin', '-f', '-u', 'root', 'drop', name]) + + def dump(self, name, dump_filename): + subprocess.check_call( + 'mysqldump -u root %(name)s > %(dump_filename)s' % locals(), + shell=True) + + def url(self, name): + return 'mysql://root@localhost/%s' % name + + +class Postgres(object): + def create(self, name): + subprocess.check_call(['createdb', name]) + + def drop(self, name): + subprocess.check_call(['dropdb', name]) + + def dump(self, name, dump_filename): + subprocess.check_call( + 'pg_dump %(name)s > %(dump_filename)s' % locals(), + shell=True) + + def url(self, name): + return 'postgres://localhost/%s' % name + + +def _get_db_driver_class(db_type): + if db_type == "mysql": + return MySQL + elif db_type == "postgres": + return Postgres + else: + raise Exception(_("database %s not supported") % db_type) + + +### Migrate + + +MIGRATE_REPO = os.path.join(os.getcwd(), "gceapi/db/sqlalchemy/migrate_repo") + + +def migrate(db_driver, db_name, migration_version): + earliest_version = _migrate_get_earliest_version() + + # NOTE(sirp): sqlalchemy-migrate currently cannot handle the skipping of + # migration numbers. + _migrate_cmd( + db_driver, db_name, 'version_control', str(earliest_version - 1)) + + upgrade_cmd = ['upgrade'] + if migration_version != 'latest': + upgrade_cmd.append(str(migration_version)) + + _migrate_cmd(db_driver, db_name, *upgrade_cmd) + + +def _migrate_cmd(db_driver, db_name, *cmd): + manage_py = os.path.join(MIGRATE_REPO, 'manage.py') + + args = ['python', manage_py] + args += cmd + args += ['--repository=%s' % MIGRATE_REPO, + '--url=%s' % db_driver.url(db_name)] + + subprocess.check_call(args) + + +def _migrate_get_earliest_version(): + versions_glob = os.path.join(MIGRATE_REPO, 'versions', '???_*.py') + + versions = [] + for path in glob.iglob(versions_glob): + filename = os.path.basename(path) + prefix = filename.split('_', 1)[0] + try: + version = int(prefix) + except ValueError: + pass + versions.append(version) + + versions.sort() + return versions[0] + + +### Git + + +def git_current_branch_name(): + ref_name = git_symbolic_ref('HEAD', quiet=True) + current_branch_name = ref_name.replace('refs/heads/', '') + return current_branch_name + + +def git_symbolic_ref(ref, quiet=False): + args = ['git', 'symbolic-ref', ref] + if quiet: + args.append('-q') + proc = subprocess.Popen(args, stdout=subprocess.PIPE) + stdout, stderr = proc.communicate() + return stdout.strip() + + +def git_checkout(branch_name): + subprocess.check_call(['git', 'checkout', branch_name]) + + +def git_has_uncommited_changes(): + return subprocess.call(['git', 'diff', '--quiet', '--exit-code']) == 1 + + +### Command + + +def die(msg): + print >> sys.stderr, "ERROR: %s" % msg + sys.exit(1) + + +def usage(msg=None): + if msg: + print >> sys.stderr, "ERROR: %s" % msg + + prog = "schema_diff.py" + args = ["", "", + ""] + + print >> sys.stderr, "usage: %s %s" % (prog, ' '.join(args)) + sys.exit(1) + + +def parse_options(): + try: + db_type = sys.argv[1] + except IndexError: + usage("must specify DB type") + + try: + orig_branch, orig_version = sys.argv[2].split(':') + except IndexError: + usage('original branch and version required (e.g. master:82)') + + try: + new_branch, new_version = sys.argv[3].split(':') + except IndexError: + usage('new branch and version required (e.g. master:82)') + + return db_type, orig_branch, orig_version, new_branch, new_version + + +def main(): + timestamp = datetime.datetime.utcnow().strftime("%Y%m%d_%H%M%S") + + ORIG_DB = 'orig_db_%s' % timestamp + NEW_DB = 'new_db_%s' % timestamp + + ORIG_DUMP = ORIG_DB + ".dump" + NEW_DUMP = NEW_DB + ".dump" + + options = parse_options() + db_type, orig_branch, orig_version, new_branch, new_version = options + + # Since we're going to be switching branches, ensure user doesn't have any + # uncommited changes + if git_has_uncommited_changes(): + die("You have uncommited changes. Please commit them before running " + "this command.") + + db_driver = _get_db_driver_class(db_type)() + + users_branch = git_current_branch_name() + git_checkout(orig_branch) + + try: + # Dump Original Schema + dump_db(db_driver, ORIG_DB, orig_version, ORIG_DUMP) + + # Dump New Schema + git_checkout(new_branch) + dump_db(db_driver, NEW_DB, new_version, NEW_DUMP) + + diff_files(ORIG_DUMP, NEW_DUMP) + finally: + git_checkout(users_branch) + + if os.path.exists(ORIG_DUMP): + os.unlink(ORIG_DUMP) + + if os.path.exists(NEW_DUMP): + os.unlink(NEW_DUMP) + + +if __name__ == "__main__": + main() diff --git a/tools/enable-pre-commit-hook.sh b/tools/enable-pre-commit-hook.sh new file mode 100755 index 0000000..b21a635 --- /dev/null +++ b/tools/enable-pre-commit-hook.sh @@ -0,0 +1,42 @@ +#!/bin/sh + +# Copyright 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +PRE_COMMIT_SCRIPT=.git/hooks/pre-commit + +make_hook() { + echo "exec ./run_tests.sh -N -p" >> $PRE_COMMIT_SCRIPT + chmod +x $PRE_COMMIT_SCRIPT + + if [ -w $PRE_COMMIT_SCRIPT -a -x $PRE_COMMIT_SCRIPT ]; then + echo "pre-commit hook was created successfully" + else + echo "unable to create pre-commit hook" + fi +} + +# NOTE(jk0): Make sure we are in gce-api's root directory before adding the hook. +if [ ! -d ".git" ]; then + echo "unable to find .git; moving up a directory" + cd .. + if [ -d ".git" ]; then + make_hook + else + echo "still unable to find .git; hook not created" + fi +else + make_hook +fi + diff --git a/tools/install_venv.py b/tools/install_venv.py new file mode 100644 index 0000000..ae2e37b --- /dev/null +++ b/tools/install_venv.py @@ -0,0 +1,74 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Copyright 2010 OpenStack Foundation +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import sys + +import install_venv_common as install_venv + + +def print_help(venv, root): + help = """ + Nova development environment setup is complete. + + Nova development uses virtualenv to track and manage Python dependencies + while in development and testing. + + To activate the Nova virtualenv for the extent of your current shell + session you can run: + + $ source %s/bin/activate + + Or, if you prefer, you can run commands in the virtualenv on a case by case + basis by running: + + $ %s/tools/with_venv.sh + + Also, make test will automatically use the virtualenv. + """ + print help % (venv, root) + + +def main(argv): + root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + + if os.environ.get('tools_path'): + root = os.environ['tools_path'] + venv = os.path.join(root, '.venv') + if os.environ.get('venv'): + venv = os.environ['venv'] + + pip_requires = os.path.join(root, 'requirements.txt') + test_requires = os.path.join(root, 'test-requirements.txt') + py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) + project = 'Gce-api' + install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, + py_version, project) + options = install.parse_args(argv) + install.check_python_version() + install.check_dependencies() + install.create_virtualenv(no_site_packages=options.no_site_packages) + install.install_dependencies() + install.post_process() + print_help(venv, root) + +if __name__ == '__main__': + main(sys.argv) diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py new file mode 100644 index 0000000..92d66ae --- /dev/null +++ b/tools/install_venv_common.py @@ -0,0 +1,213 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides methods needed by installation script for OpenStack development +virtual environments. + +Since this script is used to bootstrap a virtualenv from the system's Python +environment, it should be kept strictly compatible with Python 2.6. + +Synced in from openstack-common +""" + +from __future__ import print_function + +import optparse +import os +import subprocess +import sys + + +class InstallVenv(object): + + def __init__(self, root, venv, requirements, + test_requirements, py_version, + project): + self.root = root + self.venv = venv + self.requirements = requirements + self.test_requirements = test_requirements + self.py_version = py_version + self.project = project + + def die(self, message, *args): + print(message % args, file=sys.stderr) + sys.exit(1) + + def check_python_version(self): + if sys.version_info < (2, 6): + self.die("Need Python Version >= 2.6") + + def run_command_with_code(self, cmd, redirect_output=True, + check_exit_code=True): + """Runs a command in an out-of-process shell. + + Returns the output of that command. Working directory is self.root. + """ + if redirect_output: + stdout = subprocess.PIPE + else: + stdout = None + + proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) + output = proc.communicate()[0] + if check_exit_code and proc.returncode != 0: + self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) + return (output, proc.returncode) + + def run_command(self, cmd, redirect_output=True, check_exit_code=True): + return self.run_command_with_code(cmd, redirect_output, + check_exit_code)[0] + + def get_distro(self): + if (os.path.exists('/etc/fedora-release') or + os.path.exists('/etc/redhat-release')): + return Fedora( + self.root, self.venv, self.requirements, + self.test_requirements, self.py_version, self.project) + else: + return Distro( + self.root, self.venv, self.requirements, + self.test_requirements, self.py_version, self.project) + + def check_dependencies(self): + self.get_distro().install_virtualenv() + + def create_virtualenv(self, no_site_packages=True): + """Creates the virtual environment and installs PIP. + + Creates the virtual environment and installs PIP only into the + virtual environment. + """ + if not os.path.isdir(self.venv): + print('Creating venv...', end=' ') + if no_site_packages: + self.run_command(['virtualenv', '-q', '--no-site-packages', + self.venv]) + else: + self.run_command(['virtualenv', '-q', self.venv]) + print('done.') + else: + print("venv already exists...") + pass + + def pip_install(self, *args): + self.run_command(['tools/with_venv.sh', + 'pip', 'install', '--upgrade'] + list(args), + redirect_output=False) + + def install_dependencies(self): + print('Installing dependencies with pip (this can take a while)...') + + # First things first, make sure our venv has the latest pip and + # setuptools and pbr + self.pip_install('pip>=1.4') + self.pip_install('setuptools') + self.pip_install('pbr') + + self.pip_install('-r', self.requirements, '-r', self.test_requirements) + + def post_process(self): + self.get_distro().post_process() + + def parse_args(self, argv): + """Parses command-line arguments.""" + parser = optparse.OptionParser() + parser.add_option('-n', '--no-site-packages', + action='store_true', + help="Do not inherit packages from global Python " + "install") + return parser.parse_args(argv[1:])[0] + + +class Distro(InstallVenv): + + def check_cmd(self, cmd): + return bool(self.run_command(['which', cmd], + check_exit_code=False).strip()) + + def install_virtualenv(self): + if self.check_cmd('virtualenv'): + return + + if self.check_cmd('easy_install'): + print('Installing virtualenv via easy_install...', end=' ') + if self.run_command(['easy_install', 'virtualenv']): + print('Succeeded') + return + else: + print('Failed') + + self.die('ERROR: virtualenv not found.\n\n%s development' + ' requires virtualenv, please install it using your' + ' favorite package management tool' % self.project) + + def post_process(self): + """Any distribution-specific post-processing gets done here. + + In particular, this is useful for applying patches to code inside + the venv. + """ + pass + + +class Fedora(Distro): + """This covers all Fedora-based distributions. + + Includes: Fedora, RHEL, CentOS, Scientific Linux + """ + + def check_pkg(self, pkg): + return self.run_command_with_code(['rpm', '-q', pkg], + check_exit_code=False)[1] == 0 + + def apply_patch(self, originalfile, patchfile): + self.run_command(['patch', '-N', originalfile, patchfile], + check_exit_code=False) + + def install_virtualenv(self): + if self.check_cmd('virtualenv'): + return + + if not self.check_pkg('python-virtualenv'): + self.die("Please install 'python-virtualenv'.") + + super(Fedora, self).install_virtualenv() + + def post_process(self): + """Workaround for a bug in eventlet. + + This currently affects RHEL6.1, but the fix can safely be + applied to all RHEL and Fedora distributions. + + This can be removed when the fix is applied upstream. + + Nova: https://bugs.launchpad.net/nova/+bug/884915 + Upstream: https://bitbucket.org/eventlet/eventlet/issue/89 + RHEL: https://bugzilla.redhat.com/958868 + """ + + if os.path.exists('contrib/redhat-eventlet.patch'): + # Install "patch" program if it's not there + if not self.check_pkg('patch'): + self.die("Please install 'patch'.") + + # Apply the eventlet patch + self.apply_patch(os.path.join(self.venv, 'lib', self.py_version, + 'site-packages', + 'eventlet/green/subprocess.py'), + 'contrib/redhat-eventlet.patch') diff --git a/tools/lintstack.py b/tools/lintstack.py new file mode 100755 index 0000000..15dda49 --- /dev/null +++ b/tools/lintstack.py @@ -0,0 +1,199 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012, AT&T Labs, Yun Mao +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""pylint error checking.""" + +import cStringIO as StringIO +import json +import re +import sys + +from pylint import lint +from pylint.reporters import text + +# Note(maoy): E1103 is error code related to partial type inference +ignore_codes = ["E1103"] +# Note(maoy): the error message is the pattern of E0202. It should be ignored +# for gceapi.tests modules +ignore_messages = ["An attribute affected in gceapi.tests"] +# Note(maoy): we ignore all errors in openstack.common because it should be +# checked elsewhere. We also ignore gceapi.tests for now due to high false +# positive rate. +ignore_modules = ["gceapi/openstack/common/", "gceapi/tests/"] + +KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions" + + +class LintOutput(object): + + _cached_filename = None + _cached_content = None + + def __init__(self, filename, lineno, line_content, code, message, + lintoutput): + self.filename = filename + self.lineno = lineno + self.line_content = line_content + self.code = code + self.message = message + self.lintoutput = lintoutput + + @classmethod + def from_line(cls, line): + m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line) + matched = m.groups() + filename, lineno, code, message = (matched[0], int(matched[1]), + matched[2], matched[-1]) + if cls._cached_filename != filename: + with open(filename) as f: + cls._cached_content = list(f.readlines()) + cls._cached_filename = filename + line_content = cls._cached_content[lineno - 1].rstrip() + return cls(filename, lineno, line_content, code, message, + line.rstrip()) + + @classmethod + def from_msg_to_dict(cls, msg): + """From the output of pylint msg, to a dict, where each key + is a unique error identifier, value is a list of LintOutput + """ + result = {} + for line in msg.splitlines(): + obj = cls.from_line(line) + if obj.is_ignored(): + continue + key = obj.key() + if key not in result: + result[key] = [] + result[key].append(obj) + return result + + def is_ignored(self): + if self.code in ignore_codes: + return True + if any(self.filename.startswith(name) for name in ignore_modules): + return True + if any(msg in self.message for msg in ignore_messages): + return True + return False + + def key(self): + if self.code in ["E1101", "E1103"]: + # These two types of errors are like Foo class has no member bar. + # We discard the source code so that the error will be ignored + # next time another Foo.bar is encountered. + return self.message, "" + return self.message, self.line_content.strip() + + def json(self): + return json.dumps(self.__dict__) + + def review_str(self): + return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n" + "%(code)s: %(message)s" % self.__dict__) + + +class ErrorKeys(object): + + @classmethod + def print_json(cls, errors, output=sys.stdout): + print >>output, "# automatically generated by tools/lintstack.py" + for i in sorted(errors.keys()): + print >>output, json.dumps(i) + + @classmethod + def from_file(cls, filename): + keys = set() + for line in open(filename): + if line and line[0] != "#": + d = json.loads(line) + keys.add(tuple(d)) + return keys + + +def run_pylint(): + buff = StringIO.StringIO() + reporter = text.ParseableTextReporter(output=buff) + args = ["--include-ids=y", "-E", "gceapi"] + lint.Run(args, reporter=reporter, exit=False) + val = buff.getvalue() + buff.close() + return val + + +def generate_error_keys(msg=None): + print "Generating", KNOWN_PYLINT_EXCEPTIONS_FILE + if msg is None: + msg = run_pylint() + errors = LintOutput.from_msg_to_dict(msg) + with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f: + ErrorKeys.print_json(errors, output=f) + + +def validate(newmsg=None): + print "Loading", KNOWN_PYLINT_EXCEPTIONS_FILE + known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE) + if newmsg is None: + print "Running pylint. Be patient..." + newmsg = run_pylint() + errors = LintOutput.from_msg_to_dict(newmsg) + + print "Unique errors reported by pylint: was %d, now %d." \ + % (len(known), len(errors)) + passed = True + for err_key, err_list in errors.items(): + for err in err_list: + if err_key not in known: + print err.lintoutput + print + passed = False + if passed: + print "Congrats! pylint check passed." + redundant = known - set(errors.keys()) + if redundant: + print "Extra credit: some known pylint exceptions disappeared." + for i in sorted(redundant): + print json.dumps(i) + print "Consider regenerating the exception file if you will." + else: + print ("Please fix the errors above. If you believe they are false" + " positives, run 'tools/lintstack.py generate' to overwrite.") + sys.exit(1) + + +def usage(): + print """Usage: tools/lintstack.py [generate|validate] + To generate pylint_exceptions file: tools/lintstack.py generate + To validate the current commit: tools/lintstack.py + """ + + +def main(): + option = "validate" + if len(sys.argv) > 1: + option = sys.argv[1] + if option == "generate": + generate_error_keys() + elif option == "validate": + validate() + else: + usage() + + +if __name__ == "__main__": + main() diff --git a/tools/lintstack.sh b/tools/lintstack.sh new file mode 100755 index 0000000..d8591d0 --- /dev/null +++ b/tools/lintstack.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +# Copyright (c) 2012-2013, AT&T Labs, Yun Mao +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Use lintstack.py to compare pylint errors. +# We run pylint twice, once on HEAD, once on the code before the latest +# commit for review. +set -e +TOOLS_DIR=$(cd $(dirname "$0") && pwd) +# Get the current branch name. +GITHEAD=`git rev-parse --abbrev-ref HEAD` +if [[ "$GITHEAD" == "HEAD" ]]; then + # In detached head mode, get revision number instead + GITHEAD=`git rev-parse HEAD` + echo "Currently we are at commit $GITHEAD" +else + echo "Currently we are at branch $GITHEAD" +fi + +cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py + +if git rev-parse HEAD^2 2>/dev/null; then + # The HEAD is a Merge commit. Here, the patch to review is + # HEAD^2, the master branch is at HEAD^1, and the patch was + # written based on HEAD^2~1. + PREV_COMMIT=`git rev-parse HEAD^2~1` + git checkout HEAD~1 + # The git merge is necessary for reviews with a series of patches. + # If not, this is a no-op so won't hurt either. + git merge $PREV_COMMIT +else + # The HEAD is not a merge commit. This won't happen on gerrit. + # Most likely you are running against your own patch locally. + # We assume the patch to examine is HEAD, and we compare it against + # HEAD~1 + git checkout HEAD~1 +fi + +# First generate tools/pylint_exceptions from HEAD~1 +$TOOLS_DIR/lintstack.head.py generate +# Then use that as a reference to compare against HEAD +git checkout $GITHEAD +$TOOLS_DIR/lintstack.head.py +echo "Check passed. FYI: the pylint exceptions are:" +cat $TOOLS_DIR/pylint_exceptions + diff --git a/tools/patch_tox_venv.py b/tools/patch_tox_venv.py new file mode 100644 index 0000000..ba3515b --- /dev/null +++ b/tools/patch_tox_venv.py @@ -0,0 +1,50 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import sys + +import install_venv_common as install_venv # noqa + + +def first_file(file_list): + for candidate in file_list: + if os.path.exists(candidate): + return candidate + + +def main(argv): + root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + + venv = os.environ['VIRTUAL_ENV'] + + pip_requires = first_file([ + os.path.join(root, 'requirements.txt'), + os.path.join(root, 'tools', 'pip-requires'), + ]) + test_requires = first_file([ + os.path.join(root, 'test-requirements.txt'), + os.path.join(root, 'tools', 'test-requires'), + ]) + py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) + project = 'gce-api' + install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, + py_version, project) + #NOTE(dprince): For Tox we only run post_process (which patches files, etc) + install.post_process() + +if __name__ == '__main__': + main(sys.argv) diff --git a/tools/regression_tester.py b/tools/regression_tester.py new file mode 100755 index 0000000..d78cf6e --- /dev/null +++ b/tools/regression_tester.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""Tool for checking if patch contains a regression test. + +By default runs against current patch but can be set to use any gerrit review +as specified by change number (uses 'git review -d'). + +Idea: take tests from patch to check, and run against code from previous patch. +If new tests pass, then no regression test, if new tests fails against old code +then either +* new tests depend on new code and cannot confirm regression test is valid + (false positive) +* new tests detects the bug being fixed (detect valid regression test) +Due to the risk of false positives, the results from this need some human +interpretation. +""" + +import optparse +import string +import subprocess +import sys + + +def run(cmd, fail_ok=False): + print "running: %s" % cmd + obj = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + shell=True) + obj.wait() + if obj.returncode != 0 and not fail_ok: + print "The above command terminated with an error." + sys.exit(obj.returncode) + return obj.stdout.read() + + +def main(): + usage = """ + Tool for checking if a patch includes a regression test. + + Usage: %prog [options]""" + parser = optparse.OptionParser(usage) + parser.add_option("-r", "--review", dest="review", + help="gerrit review number to test") + (options, args) = parser.parse_args() + if options.review: + original_branch = run("git rev-parse --abbrev-ref HEAD") + run("git review -d %s" % options.review) + else: + print ("no gerrit review number specified, running on latest commit" + "on current branch.") + + test_works = False + + # run new tests with old code + run("git checkout HEAD^ gceapi") + run("git checkout HEAD gceapi/tests") + + # identify which tests have changed + tests = run("git whatchanged --format=oneline -1 | grep \"gceapi/tests\" " + "| cut -f2").split() + test_list = [] + for test in tests: + test_list.append(string.replace(test[0:-3], '/', '.')) + + if test_list == []: + test_works = False + expect_failure = "" + else: + # run new tests, expect them to fail + expect_failure = run(("tox -epy27 %s 2>&1" % string.join(test_list)), + fail_ok=True) + if "FAILED (id=" in expect_failure: + test_works = True + + # cleanup + run("git checkout HEAD gceapi") + if options.review: + new_branch = run("git status | head -1 | cut -d ' ' -f 4") + run("git checkout %s" % original_branch) + run("git branch -D %s" % new_branch) + + print expect_failure + print "" + print "*******************************" + if test_works: + print "FOUND a regression test" + else: + print "NO regression test" + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/tools/with_venv.sh b/tools/with_venv.sh new file mode 100755 index 0000000..94e05c1 --- /dev/null +++ b/tools/with_venv.sh @@ -0,0 +1,7 @@ +#!/bin/bash +tools_path=${tools_path:-$(dirname $0)} +venv_path=${venv_path:-${tools_path}} +venv_dir=${venv_name:-/../.venv} +TOOLS=${tools_path} +VENV=${venv:-${venv_path}/${venv_dir}} +source ${VENV}/bin/activate && "$@" diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..e9a7039 --- /dev/null +++ b/tox.ini @@ -0,0 +1,56 @@ +[tox] +minversion = 1.6 +envlist = py26,py27,py33,pep8 +skipsdist = True + +[testenv] +sitepackages = True +usedevelop = True +install_command = pip install -U {opts} {packages} +setenv = VIRTUAL_ENV={envdir} + LANG=en_US.UTF-8 + LANGUAGE=en_US:en + LC_ALL=C +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = + python tools/patch_tox_venv.py + python setup.py testr --slowest --testr-args='{posargs}' + +[tox:jenkins] +sitepackages = True +downloadcache = ~/cache/pip + +[testenv:pep8] +sitepackages = False +commands = + flake8 {posargs} + +[testenv:pylint] +setenv = VIRTUAL_ENV={envdir} +commands = bash tools/lintstack.sh + +[testenv:cover] +# Also do not run test_coverage_ext tests while gathering coverage as those +# tests conflict with coverage. +setenv = VIRTUAL_ENV={envdir} +commands = + python tools/patch_tox_venv.py + python setup.py testr --coverage \ + --testr-args='^(?!.*test.*coverage).*$' + +[testenv:venv] +commands = {posargs} + +[flake8] +# E712 is ignored on purpose, since it is normal to use 'column == true' +# in sqlalchemy. +# H803 skipped on purpose per list discussion. +# TODO Hacking 0.6 checks to fix +# H102 Apache 2.0 license header not found + +ignore = E121,E122,E123,E124,E126,E127,E128,E711,E712,H102,H303,H404,F403,F811,F841,H803 +exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools + +[hacking] +import_exceptions = gceapi.openstack.common.gettextutils._