commit 90b153316b0e9b7a35694a54446b33844a462ccb Author: Liam Young Date: Tue Oct 18 11:07:48 2016 +0000 First cut diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d432e65 --- /dev/null +++ b/.gitignore @@ -0,0 +1,9 @@ +build +layers +.tox +interfaces +builds +deps +.testrepository +__pycache__ +*.pyc diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..7a4a3ea --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..8d3d204 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,12 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. +pbr>=1.8.0,<1.9.0 +PyYAML>=3.1.0 +simplejson>=2.2.0 +netifaces>=0.10.4 +netaddr>=0.7.12,!=0.7.16 +Jinja2>=2.6 # BSD License (3 clause) +six>=1.9.0 +dnspython>=1.12.0 +psutil>=1.1.1,<2.0.0 \ No newline at end of file diff --git a/src/README.md b/src/README.md new file mode 100644 index 0000000..20d8b94 --- /dev/null +++ b/src/README.md @@ -0,0 +1,23 @@ +# Overview + +This subordinate charm provides ... + + +# Usage + +With the OpenStack nova-compute and neutron-gateway charms: + + juju deploy ... + juju deploy neutron-gateway + juju add-relation nova-compute ... + juju add-relation neutron-gateway ... + +# Configuration Options + +This charm will optionally configure the local ip address of the OVS instance to something other than the 'private-address' provided by Juju: + + juju set ... os-data-network=10.20.3.0/21 + + +# Restrictions + diff --git a/src/config.yaml b/src/config.yaml new file mode 100644 index 0000000..7fc91a3 --- /dev/null +++ b/src/config.yaml @@ -0,0 +1,10 @@ +options: + os-data-network: + type: string + default: + description: | + The IP address and netmask of the OpenStack Data network (e.g., + 192.168.0.0/24) + . + This network will be used for tenant network traffic in overlay + networks. \ No newline at end of file diff --git a/src/copyright b/src/copyright new file mode 100644 index 0000000..6f8a25f --- /dev/null +++ b/src/copyright @@ -0,0 +1,16 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0 + +Files: * +Copyright: 2015, Canonical Ltd. +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. \ No newline at end of file diff --git a/src/files/mapping.json b/src/files/mapping.json new file mode 100644 index 0000000..cbaf077 --- /dev/null +++ b/src/files/mapping.json @@ -0,0 +1,1268 @@ +{ + "_comment": "Mapping OpenStack action namespaces to all its actions. Each action name is mapped to python-client method name in this namespace.", + "nova": { + "_comment": "It uses novaclient.v2.", + "agents_convert_into_with_meta": "agents.convert_into_with_meta", + "agents_create": "agents.create", + "agents_delete": "agents.delete", + "agents_find": "agents.find", + "agents_findall": "agents.findall", + "agents_list": "agents.list", + "agents_update": "agents.update", + "aggregates_add_host": "aggregates.add_host", + "aggregates_convert_into_with_meta": "aggregates.convert_into_with_meta", + "aggregates_create": "aggregates.create", + "aggregates_delete": "aggregates.delete", + "aggregates_find": "aggregates.find", + "aggregates_findall": "aggregates.findall", + "aggregates_get": "aggregates.get", + "aggregates_get_details": "aggregates.get_details", + "aggregates_list": "aggregates.list", + "aggregates_remove_host": "aggregates.remove_host", + "aggregates_set_metadata": "aggregates.set_metadata", + "aggregates_update": "aggregates.update", + "availability_zones_convert_into_with_meta": "availability_zones.convert_into_with_meta", + "availability_zones_find": "availability_zones.find", + "availability_zones_findall": "availability_zones.findall", + "availability_zones_list": "availability_zones.list", + "certs_convert_into_with_meta": "certs.convert_into_with_meta", + "certs_create": "certs.create", + "certs_get": "certs.get", + "cloudpipe_convert_into_with_meta": "cloudpipe.convert_into_with_meta", + "cloudpipe_create": "cloudpipe.create", + "cloudpipe_find": "cloudpipe.find", + "cloudpipe_findall": "cloudpipe.findall", + "cloudpipe_list": "cloudpipe.list", + "cloudpipe_update": "cloudpipe.update", + "dns_domains_convert_into_with_meta": "dns_domains.convert_into_with_meta", + "dns_domains_create_private": "dns_domains.create_private", + "dns_domains_create_public": "dns_domains.create_public", + "dns_domains_delete": "dns_domains.delete", + "dns_domains_domains": "dns_domains.domains", + "dns_entries_convert_into_with_meta": "dns_entries.convert_into_with_meta", + "dns_entries_create": "dns_entries.create", + "dns_entries_delete": "dns_entries.delete", + "dns_entries_get": "dns_entries.get", + "dns_entries_get_for_ip": "dns_entries.get_for_ip", + "dns_entries_modify_ip": "dns_entries.modify_ip", + "fixed_ips_convert_into_with_meta": "fixed_ips.convert_into_with_meta", + "fixed_ips_get": "fixed_ips.get", + "fixed_ips_reserve": "fixed_ips.reserve", + "fixed_ips_unreserve": "fixed_ips.unreserve", + "flavor_access_add_tenant_access": "flavor_access.add_tenant_access", + "flavor_access_convert_into_with_meta": "flavor_access.convert_into_with_meta", + "flavor_access_find": "flavor_access.find", + "flavor_access_findall": "flavor_access.findall", + "flavor_access_list": "flavor_access.list", + "flavor_access_remove_tenant_access": "flavor_access.remove_tenant_access", + "flavors_convert_into_with_meta": "flavors.convert_into_with_meta", + "flavors_create": "flavors.create", + "flavors_delete": "flavors.delete", + "flavors_find": "flavors.find", + "flavors_findall": "flavors.findall", + "flavors_get": "flavors.get", + "flavors_list": "flavors.list", + "floating_ip_pools_convert_into_with_meta": "floating_ip_pools.convert_into_with_meta", + "floating_ip_pools_find": "floating_ip_pools.find", + "floating_ip_pools_findall": "floating_ip_pools.findall", + "floating_ip_pools_list": "floating_ip_pools.list", + "floating_ips_convert_into_with_meta": "floating_ips.convert_into_with_meta", + "floating_ips_create": "floating_ips.create", + "floating_ips_delete": "floating_ips.delete", + "floating_ips_find": "floating_ips.find", + "floating_ips_findall": "floating_ips.findall", + "floating_ips_get": "floating_ips.get", + "floating_ips_list": "floating_ips.list", + "floating_ips_bulk_convert_into_with_meta": "floating_ips_bulk.convert_into_with_meta", + "floating_ips_bulk_create": "floating_ips_bulk.create", + "floating_ips_bulk_delete": "floating_ips_bulk.delete", + "floating_ips_bulk_find": "floating_ips_bulk.find", + "floating_ips_bulk_findall": "floating_ips_bulk.findall", + "floating_ips_bulk_list": "floating_ips_bulk.list", + "fping_convert_into_with_meta": "fping.convert_into_with_meta", + "fping_find": "fping.find", + "fping_findall": "fping.findall", + "fping_get": "fping.get", + "fping_list": "fping.list", + "hosts_convert_into_with_meta": "hosts.convert_into_with_meta", + "hosts_find": "hosts.find", + "hosts_findall": "hosts.findall", + "hosts_get": "hosts.get", + "hosts_host_action": "hosts.host_action", + "hosts_list": "hosts.list", + "hosts_list_all": "hosts.list_all", + "hosts_update": "hosts.update", + "hypervisor_stats_convert_into_with_meta": "hypervisor_stats.convert_into_with_meta", + "hypervisor_stats_statistics": "hypervisor_stats.statistics", + "hypervisors_convert_into_with_meta": "hypervisors.convert_into_with_meta", + "hypervisors_find": "hypervisors.find", + "hypervisors_findall": "hypervisors.findall", + "hypervisors_get": "hypervisors.get", + "hypervisors_list": "hypervisors.list", + "hypervisors_search": "hypervisors.search", + "hypervisors_statistics": "hypervisors.statistics", + "hypervisors_uptime": "hypervisors.uptime", + "images_convert_into_with_meta": "images.convert_into_with_meta", + "images_delete": "images.delete", + "images_delete_meta": "images.delete_meta", + "images_find": "images.find", + "images_findall": "images.findall", + "images_get": "images.get", + "images_list": "images.list", + "images_set_meta": "images.set_meta", + "keypairs_convert_into_with_meta": "keypairs.convert_into_with_meta", + "keypairs_create": "keypairs.create", + "keypairs_delete": "keypairs.delete", + "keypairs_find": "keypairs.find", + "keypairs_findall": "keypairs.findall", + "keypairs_get": "keypairs.get", + "keypairs_list": "keypairs.list", + "limits_convert_into_with_meta": "limits.convert_into_with_meta", + "limits_get": "limits.get", + "networks_add": "networks.add", + "networks_associate_host": "networks.associate_host", + "networks_associate_project": "networks.associate_project", + "networks_convert_into_with_meta": "networks.convert_into_with_meta", + "networks_create": "networks.create", + "networks_delete": "networks.delete", + "networks_disassociate": "networks.disassociate", + "networks_find": "networks.find", + "networks_findall": "networks.findall", + "networks_get": "networks.get", + "networks_list": "networks.list", + "quota_classes_convert_into_with_meta": "quota_classes.convert_into_with_meta", + "quota_classes_get": "quota_classes.get", + "quota_classes_update": "quota_classes.update", + "quotas_convert_into_with_meta": "quotas.convert_into_with_meta", + "quotas_defaults": "quotas.defaults", + "quotas_delete": "quotas.delete", + "quotas_get": "quotas.get", + "quotas_update": "quotas.update", + "security_group_default_rules_convert_into_with_meta": "security_group_default_rules.convert_into_with_meta", + "security_group_default_rules_create": "security_group_default_rules.create", + "security_group_default_rules_delete": "security_group_default_rules.delete", + "security_group_default_rules_list": "security_group_default_rules.list", + "security_group_rules_convert_into_with_meta": "security_group_rules.convert_into_with_meta", + "security_group_rules_create": "security_group_rules.create", + "security_group_rules_delete": "security_group_rules.delete", + "security_groups_convert_into_with_meta": "security_groups.convert_into_with_meta", + "security_groups_create": "security_groups.create", + "security_groups_delete": "security_groups.delete", + "security_groups_find": "security_groups.find", + "security_groups_findall": "security_groups.findall", + "security_groups_get": "security_groups.get", + "security_groups_list": "security_groups.list", + "security_groups_update": "security_groups.update", + "server_groups_convert_into_with_meta": "server_groups.convert_into_with_meta", + "server_groups_create": "server_groups.create", + "server_groups_delete": "server_groups.delete", + "server_groups_find": "server_groups.find", + "server_groups_findall": "server_groups.findall", + "server_groups_get": "server_groups.get", + "server_groups_list": "server_groups.list", + "server_migrations_convert_into_with_meta": "server_migrations.convert_into_with_meta", + "server_migrations_find": "server_migrations.find", + "server_migrations_findall": "server_migrations.findall", + "server_migrations_get": "server_migrations.get", + "server_migrations_list": "server_migrations.list", + "server_migrations_live_migrate_force_complete": "server_migrations.live_migrate_force_complete", + "server_migrations_live_migration_abort": "server_migrations.live_migration_abort", + "servers_add_fixed_ip": "servers.add_fixed_ip", + "servers_add_floating_ip": "servers.add_floating_ip", + "servers_add_security_group": "servers.add_security_group", + "servers_backup": "servers.backup", + "servers_change_password": "servers.change_password", + "servers_clear_password": "servers.clear_password", + "servers_confirm_resize": "servers.confirm_resize", + "servers_convert_into_with_meta": "servers.convert_into_with_meta", + "servers_create": "servers.create", + "servers_create_image": "servers.create_image", + "servers_delete": "servers.delete", + "servers_delete_meta": "servers.delete_meta", + "servers_diagnostics": "servers.diagnostics", + "servers_evacuate": "servers.evacuate", + "servers_find": "servers.find", + "servers_findall": "servers.findall", + "servers_force_delete": "servers.force_delete", + "servers_get": "servers.get", + "servers_get_console_output": "servers.get_console_output", + "servers_get_mks_console": "servers.get_mks_console", + "servers_get_password": "servers.get_password", + "servers_get_rdp_console": "servers.get_rdp_console", + "servers_get_serial_console": "servers.get_serial_console", + "servers_get_spice_console": "servers.get_spice_console", + "servers_get_vnc_console": "servers.get_vnc_console", + "servers_interface_attach": "servers.interface_attach", + "servers_interface_detach": "servers.interface_detach", + "servers_interface_list": "servers.interface_list", + "servers_ips": "servers.ips", + "servers_list": "servers.list", + "servers_list_security_group": "servers.list_security_group", + "servers_live_migrate": "servers.live_migrate", + "servers_lock": "servers.lock", + "servers_migrate": "servers.migrate", + "servers_pause": "servers.pause", + "servers_reboot": "servers.reboot", + "servers_rebuild": "servers.rebuild", + "servers_remove_fixed_ip": "servers.remove_fixed_ip", + "servers_remove_floating_ip": "servers.remove_floating_ip", + "servers_remove_security_group": "servers.remove_security_group", + "servers_rescue": "servers.rescue", + "servers_reset_network": "servers.reset_network", + "servers_reset_state": "servers.reset_state", + "servers_resize": "servers.resize", + "servers_restore": "servers.restore", + "servers_resume": "servers.resume", + "servers_revert_resize": "servers.revert_resize", + "servers_set_meta": "servers.set_meta", + "servers_set_meta_item": "servers.set_meta_item", + "servers_shelve": "servers.shelve", + "servers_shelve_offload": "servers.shelve_offload", + "servers_start": "servers.start", + "servers_stop": "servers.stop", + "servers_suspend": "servers.suspend", + "servers_trigger_crash_dump": "servers.trigger_crash_dump", + "servers_unlock": "servers.unlock", + "servers_unpause": "servers.unpause", + "servers_unrescue": "servers.unrescue", + "servers_unshelve": "servers.unshelve", + "servers_update": "servers.update", + "services_convert_into_with_meta": "services.convert_into_with_meta", + "services_delete": "services.delete", + "services_disable": "services.disable", + "services_disable_log_reason": "services.disable_log_reason", + "services_enable": "services.enable", + "services_find": "services.find", + "services_findall": "services.findall", + "services_force_down": "services.force_down", + "services_list": "services.list", + "usage_convert_into_with_meta": "usage.convert_into_with_meta", + "usage_find": "usage.find", + "usage_findall": "usage.findall", + "usage_get": "usage.get", + "usage_list": "usage.list", + "versions_convert_into_with_meta": "versions.convert_into_with_meta", + "versions_find": "versions.find", + "versions_findall": "versions.findall", + "versions_get_current": "versions.get_current", + "versions_list": "versions.list", + "virtual_interfaces_convert_into_with_meta": "virtual_interfaces.convert_into_with_meta", + "virtual_interfaces_find": "virtual_interfaces.find", + "virtual_interfaces_findall": "virtual_interfaces.findall", + "virtual_interfaces_list": "virtual_interfaces.list", + "volumes_convert_into_with_meta": "volumes.convert_into_with_meta", + "volumes_create_server_volume": "volumes.create_server_volume", + "volumes_delete_server_volume": "volumes.delete_server_volume", + "volumes_get_server_volume": "volumes.get_server_volume", + "volumes_get_server_volumes": "volumes.get_server_volumes", + "volumes_update_server_volume": "volumes.update_server_volume" + }, + "glance": { + "_comment": "It uses glanceclient.v2.", + "image_members_create": "image_members.create", + "image_members_delete": "image_members.delete", + "image_members_list": "image_members.list", + "image_members_update": "image_members.update", + "image_tags_delete": "image_tags.delete", + "image_tags_update": "image_tags.update", + "images_add_location": "images.add_location", + "images_create": "images.create", + "images_data": "images.data", + "images_deactivate": "images.deactivate", + "images_delete": "images.delete", + "images_delete_locations": "images.delete_locations", + "images_get": "images.get", + "images_list": "images.list", + "images_reactivate": "images.reactivate", + "images_update": "images.update", + "images_update_location": "images.update_location", + "images_upload": "images.upload", + "schemas_get": "schemas.get", + "tasks_create": "tasks.create", + "tasks_get": "tasks.get", + "tasks_list": "tasks.list", + "metadefs_resource_type_associate": "metadefs_resource_type.associate", + "metadefs_resource_type_deassociate": "metadefs_resource_type.deassociate", + "metadefs_resource_type_get": "metadefs_resource_type.get", + "metadefs_resource_type_list": "metadefs_resource_type.list", + "metadefs_property_create": "metadefs_property.create", + "metadefs_property_delete": "metadefs_property.delete", + "metadefs_property_delete_all": "metadefs_property.delete_all", + "metadefs_property_get": "metadefs_property.get", + "metadefs_property_list": "metadefs_property.list", + "metadefs_property_update": "metadefs_property.update", + "metadefs_object_create": "metadefs_object.create", + "metadefs_object_delete": "metadefs_object.delete", + "metadefs_object_delete_all": "metadefs_object.delete_all", + "metadefs_object_get": "metadefs_object.get", + "metadefs_object_list": "metadefs_object.list", + "metadefs_object_update": "metadefs_object.update", + "metadefs_tag_create": "metadefs_tag.create", + "metadefs_tag_create_multiple": "metadefs_tag.create_multiple", + "metadefs_tag_delete": "metadefs_tag.delete", + "metadefs_tag_delete_all": "metadefs_tag.delete_all", + "metadefs_tag_get": "metadefs_tag.get", + "metadefs_tag_list": "metadefs_tag.list", + "metadefs_tag_update": "metadefs_tag.update", + "metadefs_namespace_create": "metadefs_namespace.create", + "metadefs_namespace_delete": "metadefs_namespace.delete", + "metadefs_namespace_get": "metadefs_namespace.get", + "metadefs_namespace_list": "metadefs_namespace.list", + "metadefs_namespace_update": "metadefs_namespace.update", + "versions_list": "versions.list" + }, + "keystone": { + "_comment": "It uses keystoneclient.v3.", + "credentials_create": "credentials.create", + "credentials_delete": "credentials.delete", + "credentials_find": "credentials.find", + "credentials_get": "credentials.get", + "credentials_list": "credentials.list", + "credentials_update": "credentials.update", + "domains_create": "domains.create", + "domains_delete": "domains.delete", + "domains_find": "domains.find", + "domains_get": "domains.get", + "domains_list": "domains.list", + "domains_update": "domains.update", + "endpoint_filter_add_endpoint_to_project": "endpoint_filter.add_endpoint_to_project", + "endpoint_filter_check_endpoint_in_project": "endpoint_filter.check_endpoint_in_project", + "endpoint_filter_delete_endpoint_from_project": "endpoint_filter.delete_endpoint_from_project", + "endpoint_filter_list_endpoints_for_project": "endpoint_filter.list_endpoints_for_project", + "endpoint_filter_list_projects_for_endpoint": "endpoint_filter.list_projects_for_endpoint", + "endpoint_policy_check_policy_association_for_endpoint": "endpoint_policy.check_policy_association_for_endpoint", + "endpoint_policy_check_policy_association_for_region_and_service": "endpoint_policy.check_policy_association_for_region_and_service", + "endpoint_policy_check_policy_association_for_service": "endpoint_policy.check_policy_association_for_service", + "endpoint_policy_create_policy_association_for_endpoint": "endpoint_policy.create_policy_association_for_endpoint", + "endpoint_policy_create_policy_association_for_region_and_service": "endpoint_policy.create_policy_association_for_region_and_service", + "endpoint_policy_create_policy_association_for_service": "endpoint_policy.create_policy_association_for_service", + "endpoint_policy_delete_policy_association_for_endpoint": "endpoint_policy.delete_policy_association_for_endpoint", + "endpoint_policy_delete_policy_association_for_region_and_service": "endpoint_policy.delete_policy_association_for_region_and_service", + "endpoint_policy_delete_policy_association_for_service": "endpoint_policy.delete_policy_association_for_service", + "endpoint_policy_get_policy_for_endpoint": "endpoint_policy.get_policy_for_endpoint", + "endpoint_policy_list_endpoints_for_policy": "endpoint_policy.list_endpoints_for_policy", + "endpoints_create": "endpoints.create", + "endpoints_delete": "endpoints.delete", + "endpoints_find": "endpoints.find", + "endpoints_get": "endpoints.get", + "endpoints_list": "endpoints.list", + "endpoints_update": "endpoints.update", + "groups_create": "groups.create", + "groups_delete": "groups.delete", + "groups_find": "groups.find", + "groups_get": "groups.get", + "groups_list": "groups.list", + "groups_update": "groups.update", + "oauth1.consumers_build_url": "oauth1.consumers.build_url", + "oauth1.consumers_create": "oauth1.consumers.create", + "oauth1.consumers_delete": "oauth1.consumers.delete", + "oauth1.consumers_find": "oauth1.consumers.find", + "oauth1.consumers_get": "oauth1.consumers.get", + "oauth1.consumers_list": "oauth1.consumers.list", + "oauth1.consumers_put": "oauth1.consumers.put", + "oauth1.consumers_update": "oauth1.consumers.update", + "oauth1.request_tokens_authorize": "oauth1.request_tokens.authorize", + "oauth1.request_tokens_build_url": "oauth1.request_tokens.build_url", + "oauth1.request_tokens_create": "oauth1.request_tokens.create", + "oauth1.request_tokens_delete": "oauth1.request_tokens.delete", + "oauth1.request_tokens_find": "oauth1.request_tokens.find", + "oauth1.request_tokens_get": "oauth1.request_tokens.get", + "oauth1.request_tokens_list": "oauth1.request_tokens.list", + "oauth1.request_tokens_put": "oauth1.request_tokens.put", + "oauth1.request_tokens_update": "oauth1.request_tokens.update", + "oauth1.access_tokens_build_url": "oauth1.access_tokens.build_url", + "oauth1.access_tokens_create": "oauth1.access_tokens.create", + "oauth1.access_tokens_delete": "oauth1.access_tokens.delete", + "oauth1.access_tokens_find": "oauth1.access_tokens.find", + "oauth1.access_tokens_get": "oauth1.access_tokens.get", + "oauth1.access_tokens_list": "oauth1.access_tokens.list", + "oauth1.access_tokens_put": "oauth1.access_tokens.put", + "oauth1.access_tokens_update": "oauth1.access_tokens.update", + "policies_create": "policies.create", + "policies_delete": "policies.delete", + "policies_find": "policies.find", + "policies_get": "policies.get", + "policies_list": "policies.list", + "policies_update": "policies.update", + "projects_create": "projects.create", + "projects_delete": "projects.delete", + "projects_find": "projects.find", + "projects_get": "projects.get", + "projects_list": "projects.list", + "projects_update": "projects.update", + "regions_create": "regions.create", + "regions_delete": "regions.delete", + "regions_find": "regions.find", + "regions_get": "regions.get", + "regions_list": "regions.list", + "regions_update": "regions.update", + "role_assignments_create": "role_assignments.create", + "role_assignments_delete": "role_assignments.delete", + "role_assignments_find": "role_assignments.find", + "role_assignments_get": "role_assignments.get", + "role_assignments_list": "role_assignments.list", + "role_assignments_update": "role_assignments.update", + "roles_check": "roles.check", + "roles_create": "roles.create", + "roles_delete": "roles.delete", + "roles_find": "roles.find", + "roles_get": "roles.get", + "roles_grant": "roles.grant", + "roles_list": "roles.list", + "roles_revoke": "roles.revoke", + "roles_update": "roles.update", + "service_catalog_catalog": "service_catalog.catalog", + "service_catalog_factory": "service_catalog.factory", + "service_catalog_get_data": "service_catalog.get_data", + "service_catalog_get_endpoints": "service_catalog.get_endpoints", + "service_catalog_get_token": "service_catalog.get_token", + "service_catalog_get_urls": "service_catalog.get_urls", + "service_catalog_is_valid": "service_catalog.is_valid", + "service_catalog_url_for": "service_catalog.url_for", + "services_create": "services.create", + "services_delete": "services.delete", + "services_find": "services.find", + "services_get": "services.get", + "services_list": "services.list", + "services_update": "services.update", + "trusts_create": "trusts.create", + "trusts_delete": "trusts.delete", + "trusts_find": "trusts.find", + "trusts_get": "trusts.get", + "trusts_list": "trusts.list", + "trusts_update": "trusts.update", + "users_add_to_group": "users.add_to_group", + "users_check_in_group": "users.check_in_group", + "users_create": "users.create", + "users_delete": "users.delete", + "users_find": "users.find", + "users_get": "users.get", + "users_list": "users.list", + "users_remove_from_group": "users.remove_from_group", + "users_update": "users.update", + "users_update_password": "users.update_password" + }, + "heat": { + "_comment": "It uses heatclient.v1.", + "actions_cancel_update": "actions.cancel_update", + "actions_check": "actions.check", + "actions_resume": "actions.resume", + "actions_suspend": "actions.suspend", + "build_info_build_info": "build_info.build_info", + "events_get": "events.get", + "events_list": "events.list", + "resource_types_generate_template": "resource_types.generate_template", + "resource_types_get": "resource_types.get", + "resource_types_list": "resource_types.list", + "resources_generate_template": "resources.generate_template", + "resources_get": "resources.get", + "resources_list": "resources.list", + "resources_metadata": "resources.metadata", + "resources_signal": "resources.signal", + "services_list": "services.list", + "software_configs_create": "software_configs.create", + "software_configs_delete": "software_configs.delete", + "software_configs_get": "software_configs.get", + "software_configs_list": "software_configs.list", + "software_deployments_create": "software_deployments.create", + "software_deployments_delete": "software_deployments.delete", + "software_deployments_get": "software_deployments.get", + "software_deployments_list": "software_deployments.list", + "software_deployments_metadata": "software_deployments.metadata", + "software_deployments_update": "software_deployments.update", + "stacks_abandon": "stacks.abandon", + "stacks_create": "stacks.create", + "stacks_delete": "stacks.delete", + "stacks_get": "stacks.get", + "stacks_list": "stacks.list", + "stacks_output_list": "stacks.output_list", + "stacks_output_show": "stacks.output_show", + "stacks_preview": "stacks.preview", + "stacks_preview_update": "stacks.preview_update", + "stacks_restore": "stacks.restore", + "stacks_snapshot": "stacks.snapshot", + "stacks_snapshot_delete": "stacks.snapshot_delete", + "stacks_snapshot_list": "stacks.snapshot_list", + "stacks_snapshot_show": "stacks.snapshot_show", + "stacks_template": "stacks.template", + "stacks_update": "stacks.update", + "stacks_validate": "stacks.validate", + "template_versions_get": "template_versions.get", + "template_versions_list": "template_versions.list" + }, + "ceilometer": { + "_comment": "It uses ceilometerclient.v2.", + "alarms_create": "alarms.create", + "alarms_delete": "alarms.delete", + "alarms_get": "alarms.get", + "alarms_get_history": "alarms.get_history", + "alarms_get_state": "alarms.get_state", + "alarms_list": "alarms.list", + "alarms_set_state": "alarms.set_state", + "alarms_update": "alarms.update", + "capabilities_get": "capabilities.get", + "event_types_list": "event_types.list", + "events_get": "events.get", + "events_list": "events.list", + "meters_list": "meters.list", + "new_samples_get": "new_samples.get", + "new_samples_list": "new_samples.list", + "query_alarm_history_query": "query_alarm_history.query", + "query_alarms_query": "query_alarms.query", + "query_samples_query": "query_samples.query", + "resources_get": "resources.get", + "resources_list": "resources.list", + "samples_create": "samples.create", + "samples_create_list": "samples.create_list", + "samples_list": "samples.list", + "statistics_list": "statistics.list", + "trait_descriptions_list": "trait_descriptions.list", + "traits_list": "traits.list" + }, + "neutron": { + "_comment": "It uses neutronclient.v2_0.", + "add_gateway_router": "add_gateway_router", + "add_interface_router": "add_interface_router", + "add_network_to_dhcp_agent": "add_network_to_dhcp_agent", + "add_router_to_l3_agent": "add_router_to_l3_agent", + "associate_health_monitor": "associate_health_monitor", + "connect_network_gateway": "connect_network_gateway", + "create_credential": "create_credential", + "create_ext": "create_ext", + "create_firewall": "create_firewall", + "create_firewall_policy": "create_firewall_policy", + "create_firewall_rule": "create_firewall_rule", + "create_floatingip": "create_floatingip", + "create_gateway_device": "create_gateway_device", + "create_health_monitor": "create_health_monitor", + "create_ikepolicy": "create_ikepolicy", + "create_ipsec_site_connection": "create_ipsec_site_connection", + "create_ipsecpolicy": "create_ipsecpolicy", + "create_lbaas_healthmonitor": "create_lbaas_healthmonitor", + "create_lbaas_member": "create_lbaas_member", + "create_lbaas_pool": "create_lbaas_pool", + "create_listener": "create_listener", + "create_loadbalancer": "create_loadbalancer", + "create_member": "create_member", + "create_metering_label": "create_metering_label", + "create_metering_label_rule": "create_metering_label_rule", + "create_net_partition": "create_net_partition", + "create_network": "create_network", + "create_network_gateway": "create_network_gateway", + "create_network_profile": "create_network_profile", + "create_packet_filter": "create_packet_filter", + "create_pool": "create_pool", + "create_port": "create_port", + "create_qos_queue": "create_qos_queue", + "create_router": "create_router", + "create_security_group": "create_security_group", + "create_security_group_rule": "create_security_group_rule", + "create_subnet": "create_subnet", + "create_subnetpool": "create_subnetpool", + "create_vip": "create_vip", + "create_vpnservice": "create_vpnservice", + "delete_agent": "delete_agent", + "delete_credential": "delete_credential", + "delete_ext": "delete_ext", + "delete_firewall": "delete_firewall", + "delete_firewall_policy": "delete_firewall_policy", + "delete_firewall_rule": "delete_firewall_rule", + "delete_floatingip": "delete_floatingip", + "delete_gateway_device": "delete_gateway_device", + "delete_health_monitor": "delete_health_monitor", + "delete_ikepolicy": "delete_ikepolicy", + "delete_ipsec_site_connection": "delete_ipsec_site_connection", + "delete_ipsecpolicy": "delete_ipsecpolicy", + "delete_lbaas_healthmonitor": "delete_lbaas_healthmonitor", + "delete_lbaas_member": "delete_lbaas_member", + "delete_lbaas_pool": "delete_lbaas_pool", + "delete_listener": "delete_listener", + "delete_loadbalancer": "delete_loadbalancer", + "delete_member": "delete_member", + "delete_metering_label": "delete_metering_label", + "delete_metering_label_rule": "delete_metering_label_rule", + "delete_net_partition": "delete_net_partition", + "delete_network": "delete_network", + "delete_network_gateway": "delete_network_gateway", + "delete_network_profile": "delete_network_profile", + "delete_packet_filter": "delete_packet_filter", + "delete_pool": "delete_pool", + "delete_port": "delete_port", + "delete_qos_queue": "delete_qos_queue", + "delete_quota": "delete_quota", + "delete_router": "delete_router", + "delete_security_group": "delete_security_group", + "delete_security_group_rule": "delete_security_group_rule", + "delete_subnet": "delete_subnet", + "delete_subnetpool": "delete_subnetpool", + "delete_vip": "delete_vip", + "delete_vpnservice": "delete_vpnservice", + "disassociate_health_monitor": "disassociate_health_monitor", + "disconnect_network_gateway": "disconnect_network_gateway", + "extend_create": "extend_create", + "extend_delete": "extend_delete", + "extend_list": "extend_list", + "extend_show": "extend_show", + "extend_update": "extend_update", + "firewall_policy_insert_rule": "firewall_policy_insert_rule", + "firewall_policy_remove_rule": "firewall_policy_remove_rule", + "get_lbaas_agent_hosting_loadbalancer": "get_lbaas_agent_hosting_loadbalancer", + "get_lbaas_agent_hosting_pool": "get_lbaas_agent_hosting_pool", + "get_quotas_tenant": "get_quotas_tenant", + "list_agents": "list_agents", + "list_credentials": "list_credentials", + "list_dhcp_agent_hosting_networks": "list_dhcp_agent_hosting_networks", + "list_ext": "list_ext", + "list_extensions": "list_extensions", + "list_firewall_policies": "list_firewall_policies", + "list_firewall_rules": "list_firewall_rules", + "list_firewalls": "list_firewalls", + "list_floatingips": "list_floatingips", + "list_gateway_devices": "list_gateway_devices", + "list_health_monitors": "list_health_monitors", + "list_ikepolicies": "list_ikepolicies", + "list_ipsec_site_connections": "list_ipsec_site_connections", + "list_ipsecpolicies": "list_ipsecpolicies", + "list_l3_agent_hosting_routers": "list_l3_agent_hosting_routers", + "list_lbaas_healthmonitors": "list_lbaas_healthmonitors", + "list_lbaas_loadbalancers": "list_lbaas_loadbalancers", + "list_lbaas_members": "list_lbaas_members", + "list_lbaas_pools": "list_lbaas_pools", + "list_listeners": "list_listeners", + "list_loadbalancers": "list_loadbalancers", + "list_loadbalancers_on_lbaas_agent": "list_loadbalancers_on_lbaas_agent", + "list_members": "list_members", + "list_metering_label_rules": "list_metering_label_rules", + "list_metering_labels": "list_metering_labels", + "list_net_partitions": "list_net_partitions", + "list_network_gateways": "list_network_gateways", + "list_network_profile_bindings": "list_network_profile_bindings", + "list_network_profiles": "list_network_profiles", + "list_networks": "list_networks", + "list_networks_on_dhcp_agent": "list_networks_on_dhcp_agent", + "list_packet_filters": "list_packet_filters", + "list_policy_profile_bindings": "list_policy_profile_bindings", + "list_policy_profiles": "list_policy_profiles", + "list_pools": "list_pools", + "list_pools_on_lbaas_agent": "list_pools_on_lbaas_agent", + "list_ports": "list_ports", + "list_qos_queues": "list_qos_queues", + "list_quotas": "list_quotas", + "list_routers": "list_routers", + "list_routers_on_l3_agent": "list_routers_on_l3_agent", + "list_security_group_rules": "list_security_group_rules", + "list_security_groups": "list_security_groups", + "list_service_providers": "list_service_providers", + "list_subnetpools": "list_subnetpools", + "list_subnets": "list_subnets", + "list_vips": "list_vips", + "list_vpnservices": "list_vpnservices", + "remove_gateway_router": "remove_gateway_router", + "remove_interface_router": "remove_interface_router", + "remove_network_from_dhcp_agent": "remove_network_from_dhcp_agent", + "remove_router_from_l3_agent": "remove_router_from_l3_agent", + "retrieve_pool_stats": "retrieve_pool_stats", + "show_agent": "show_agent", + "show_credential": "show_credential", + "show_ext": "show_ext", + "show_extension": "show_extension", + "show_firewall": "show_firewall", + "show_firewall_policy": "show_firewall_policy", + "show_firewall_rule": "show_firewall_rule", + "show_floatingip": "show_floatingip", + "show_gateway_device": "show_gateway_device", + "show_health_monitor": "show_health_monitor", + "show_ikepolicy": "show_ikepolicy", + "show_ipsec_site_connection": "show_ipsec_site_connection", + "show_ipsecpolicy": "show_ipsecpolicy", + "show_lbaas_healthmonitor": "show_lbaas_healthmonitor", + "show_lbaas_member": "show_lbaas_member", + "show_lbaas_pool": "show_lbaas_pool", + "show_listener": "show_listener", + "show_loadbalancer": "show_loadbalancer", + "show_member": "show_member", + "show_metering_label": "show_metering_label", + "show_metering_label_rule": "show_metering_label_rule", + "show_net_partition": "show_net_partition", + "show_network": "show_network", + "show_network_gateway": "show_network_gateway", + "show_network_profile": "show_network_profile", + "show_packet_filter": "show_packet_filter", + "show_policy_profile": "show_policy_profile", + "show_pool": "show_pool", + "show_port": "show_port", + "show_qos_queue": "show_qos_queue", + "show_quota": "show_quota", + "show_router": "show_router", + "show_security_group": "show_security_group", + "show_security_group_rule": "show_security_group_rule", + "show_subnet": "show_subnet", + "show_subnetpool": "show_subnetpool", + "show_vip": "show_vip", + "show_vpnservice": "show_vpnservice", + "update_agent": "update_agent", + "update_credential": "update_credential", + "update_ext": "update_ext", + "update_firewall": "update_firewall", + "update_firewall_policy": "update_firewall_policy", + "update_firewall_rule": "update_firewall_rule", + "update_floatingip": "update_floatingip", + "update_gateway_device": "update_gateway_device", + "update_health_monitor": "update_health_monitor", + "update_ikepolicy": "update_ikepolicy", + "update_ipsec_site_connection": "update_ipsec_site_connection", + "update_ipsecpolicy": "update_ipsecpolicy", + "update_lbaas_healthmonitor": "update_lbaas_healthmonitor", + "update_lbaas_member": "update_lbaas_member", + "update_lbaas_pool": "update_lbaas_pool", + "update_listener": "update_listener", + "update_loadbalancer": "update_loadbalancer", + "update_member": "update_member", + "update_network": "update_network", + "update_network_gateway": "update_network_gateway", + "update_network_profile": "update_network_profile", + "update_packet_filter": "update_packet_filter", + "update_policy_profile": "update_policy_profile", + "update_pool": "update_pool", + "update_port": "update_port", + "update_quota": "update_quota", + "update_router": "update_router", + "update_security_group": "update_security_group", + "update_subnet": "update_subnet", + "update_subnetpool": "update_subnetpool", + "update_vip": "update_vip", + "update_vpnservice": "update_vpnservice" + }, + "cinder": { + "_comment": "It uses cinderclient.v2.", + "availability_zones_find": "availability_zones.find", + "availability_zones_findall": "availability_zones.findall", + "availability_zones_list": "availability_zones.list", + "backups_create": "backups.create", + "backups_delete": "backups.delete", + "backups_export_record": "backups.export_record", + "backups_find": "backups.find", + "backups_findall": "backups.findall", + "backups_get": "backups.get", + "backups_import_record": "backups.import_record", + "backups_list": "backups.list", + "backups_reset_state": "backups.reset_state", + "capabilities_get": "capabilities.get", + "cgsnapshots_create": "cgsnapshots.create", + "cgsnapshots_delete": "cgsnapshots.delete", + "cgsnapshots_find": "cgsnapshots.find", + "cgsnapshots_findall": "cgsnapshots.findall", + "cgsnapshots_get": "cgsnapshots.get", + "cgsnapshots_list": "cgsnapshots.list", + "cgsnapshots_update": "cgsnapshots.update", + "consistencygroups_create": "consistencygroups.create", + "consistencygroups_create_from_src": "consistencygroups.create_from_src", + "consistencygroups_delete": "consistencygroups.delete", + "consistencygroups_find": "consistencygroups.find", + "consistencygroups_findall": "consistencygroups.findall", + "consistencygroups_get": "consistencygroups.get", + "consistencygroups_list": "consistencygroups.list", + "consistencygroups_update": "consistencygroups.update", + "limits_get": "limits.get", + "pools_list": "pools.list", + "qos_specs_associate": "qos_specs.associate", + "qos_specs_create": "qos_specs.create", + "qos_specs_delete": "qos_specs.delete", + "qos_specs_disassociate": "qos_specs.disassociate", + "qos_specs_disassociate_all": "qos_specs.disassociate_all", + "qos_specs_find": "qos_specs.find", + "qos_specs_findall": "qos_specs.findall", + "qos_specs_get": "qos_specs.get", + "qos_specs_get_associations": "qos_specs.get_associations", + "qos_specs_list": "qos_specs.list", + "qos_specs_set_keys": "qos_specs.set_keys", + "qos_specs_unset_keys": "qos_specs.unset_keys", + "quota_classes_get": "quota_classes.get", + "quota_classes_update": "quota_classes.update", + "quotas_defaults": "quotas.defaults", + "quotas_delete": "quotas.delete", + "quotas_get": "quotas.get", + "quotas_update": "quotas.update", + "restores_restore": "restores.restore", + "services_disable": "services.disable", + "services_disable_log_reason": "services.disable_log_reason", + "services_enable": "services.enable", + "services_find": "services.find", + "services_findall": "services.findall", + "services_list": "services.list", + "transfers_accept": "transfers.accept", + "transfers_create": "transfers.create", + "transfers_delete": "transfers.delete", + "transfers_find": "transfers.find", + "transfers_findall": "transfers.findall", + "transfers_get": "transfers.get", + "transfers_list": "transfers.list", + "volume_encryption_types_create": "volume_encryption_types.create", + "volume_encryption_types_delete": "volume_encryption_types.delete", + "volume_encryption_types_find": "volume_encryption_types.find", + "volume_encryption_types_findall": "volume_encryption_types.findall", + "volume_encryption_types_get": "volume_encryption_types.get", + "volume_encryption_types_list": "volume_encryption_types.list", + "volume_encryption_types_update": "volume_encryption_types.update", + "volume_snapshots_create": "volume_snapshots.create", + "volume_snapshots_delete": "volume_snapshots.delete", + "volume_snapshots_delete_metadata": "volume_snapshots.delete_metadata", + "volume_snapshots_find": "volume_snapshots.find", + "volume_snapshots_findall": "volume_snapshots.findall", + "volume_snapshots_get": "volume_snapshots.get", + "volume_snapshots_list": "volume_snapshots.list", + "volume_snapshots_reset_state": "volume_snapshots.reset_state", + "volume_snapshots_set_metadata": "volume_snapshots.set_metadata", + "volume_snapshots_update": "volume_snapshots.update", + "volume_snapshots_update_all_metadata": "volume_snapshots.update_all_metadata", + "volume_snapshots_update_snapshot_status": "volume_snapshots.update_snapshot_status", + "volume_type_access_add_project_access": "volume_type_access.add_project_access", + "volume_type_access_find": "volume_type_access.find", + "volume_type_access_findall": "volume_type_access.findall", + "volume_type_access_list": "volume_type_access.list", + "volume_type_access_remove_project_access": "volume_type_access.remove_project_access", + "volume_types_create": "volume_types.create", + "volume_types_default": "volume_types.default", + "volume_types_delete": "volume_types.delete", + "volume_types_find": "volume_types.find", + "volume_types_findall": "volume_types.findall", + "volume_types_get": "volume_types.get", + "volume_types_list": "volume_types.list", + "volume_types_update": "volume_types.update", + "volumes_attach": "volumes.attach", + "volumes_begin_detaching": "volumes.begin_detaching", + "volumes_create": "volumes.create", + "volumes_delete": "volumes.delete", + "volumes_delete_image_metadata": "volumes.delete_image_metadata", + "volumes_delete_metadata": "volumes.delete_metadata", + "volumes_detach": "volumes.detach", + "volumes_extend": "volumes.extend", + "volumes_find": "volumes.find", + "volumes_findall": "volumes.findall", + "volumes_force_delete": "volumes.force_delete", + "volumes_get": "volumes.get", + "volumes_get_encryption_metadata": "volumes.get_encryption_metadata", + "volumes_get_pools": "volumes.get_pools", + "volumes_initialize_connection": "volumes.initialize_connection", + "volumes_list": "volumes.list", + "volumes_manage": "volumes.manage", + "volumes_migrate_volume": "volumes.migrate_volume", + "volumes_migrate_volume_completion": "volumes.migrate_volume_completion", + "volumes_promote": "volumes.promote", + "volumes_reenable": "volumes.reenable", + "volumes_replication_disable": "volumes.replication_disable", + "volumes_replication_enable": "volumes.replication_enable", + "volumes_replication_failover": "volumes.replication_failover", + "volumes_replication_list_targets": "volumes.replication_list_targets", + "volumes_reserve": "volumes.reserve", + "volumes_reset_state": "volumes.reset_state", + "volumes_retype": "volumes.retype", + "volumes_roll_detaching": "volumes.roll_detaching", + "volumes_set_bootable": "volumes.set_bootable", + "volumes_set_image_metadata": "volumes.set_image_metadata", + "volumes_set_metadata": "volumes.set_metadata", + "volumes_show_image_metadata": "volumes.show_image_metadata", + "volumes_terminate_connection": "volumes.terminate_connection", + "volumes_unmanage": "volumes.unmanage", + "volumes_unreserve": "volumes.unreserve", + "volumes_update": "volumes.update", + "volumes_update_all_metadata": "volumes.update_all_metadata", + "volumes_update_readonly_flag": "volumes.update_readonly_flag", + "volumes_upload_to_image": "volumes.upload_to_image" + }, + "trove": { + "_comment": "It uses troveclient.v1.", + "backups_create": "backups.create", + "backups_delete": "backups.delete", + "backups_find": "backups.find", + "backups_findall": "backups.findall", + "backups_get": "backups.get", + "backups_list": "backups.list", + "clusters_add_shard": "clusters.add_shard", + "clusters_create": "clusters.create", + "clusters_delete": "clusters.delete", + "clusters_find": "clusters.find", + "clusters_findall": "clusters.findall", + "clusters_get": "clusters.get", + "clusters_grow": "clusters.grow", + "clusters_list": "clusters.list", + "clusters_shrink": "clusters.shrink", + "configuration_parameters_find": "configuration_parameters.find", + "configuration_parameters_findall": "configuration_parameters.findall", + "configuration_parameters_get_parameter": "configuration_parameters.get_parameter", + "configuration_parameters_get_parameter_by_version": "configuration_parameters.get_parameter_by_version", + "configuration_parameters_list": "configuration_parameters.list", + "configuration_parameters_parameters": "configuration_parameters.parameters", + "configuration_parameters_parameters_by_version": "configuration_parameters.parameters_by_version", + "configurations_create": "configurations.create", + "configurations_delete": "configurations.delete", + "configurations_edit": "configurations.edit", + "configurations_find": "configurations.find", + "configurations_findall": "configurations.findall", + "configurations_get": "configurations.get", + "configurations_instances": "configurations.instances", + "configurations_list": "configurations.list", + "configurations_update": "configurations.update", + "databases_create": "databases.create", + "databases_delete": "databases.delete", + "databases_find": "databases.find", + "databases_findall": "databases.findall", + "databases_list": "databases.list", + "datastore_versions_find": "datastore_versions.find", + "datastore_versions_findall": "datastore_versions.findall", + "datastore_versions_get": "datastore_versions.get", + "datastore_versions_get_by_uuid": "datastore_versions.get_by_uuid", + "datastore_versions_list": "datastore_versions.list", + "datastore_versions_update": "datastore_versions.update", + "datastores_find": "datastores.find", + "datastores_findall": "datastores.findall", + "datastores_get": "datastores.get", + "datastores_list": "datastores.list", + "flavors_find": "flavors.find", + "flavors_findall": "flavors.findall", + "flavors_get": "flavors.get", + "flavors_list": "flavors.list", + "flavors_list_datastore_version_associated_flavors": "flavors.list_datastore_version_associated_flavors", + "instances_backups": "instances.backups", + "instances_configuration": "instances.configuration", + "instances_create": "instances.create", + "instances_delete": "instances.delete", + "instances_edit": "instances.edit", + "instances_eject_replica_source": "instances.eject_replica_source", + "instances_find": "instances.find", + "instances_findall": "instances.findall", + "instances_get": "instances.get", + "instances_list": "instances.list", + "instances_modify": "instances.modify", + "instances_promote_to_replica_source": "instances.promote_to_replica_source", + "instances_resize_instance": "instances.resize_instance", + "instances_resize_volume": "instances.resize_volume", + "instances_restart": "instances.restart", + "limits_find": "limits.find", + "limits_findall": "limits.findall", + "limits_list": "limits.list", + "metadata_create": "metadata.create", + "metadata_delete": "metadata.delete", + "metadata_edit": "metadata.edit", + "metadata_list": "metadata.list", + "metadata_show": "metadata.show", + "metadata_update": "metadata.update", + "root_create": "root.create", + "root_create_cluster_root": "root.create_cluster_root", + "root_create_instance_root": "root.create_instance_root", + "root_delete": "root.delete", + "root_disable_instance_root": "root.disable_instance_root", + "root_find": "root.find", + "root_findall": "root.findall", + "root_is_cluster_root_enabled": "root.is_cluster_root_enabled", + "root_is_instance_root_enabled": "root.is_instance_root_enabled", + "root_is_root_enabled": "root.is_root_enabled", + "root_list": "root.list", + "security_group_rules_create": "security_group_rules.create", + "security_group_rules_delete": "security_group_rules.delete", + "security_group_rules_find": "security_group_rules.find", + "security_group_rules_findall": "security_group_rules.findall", + "security_group_rules_list": "security_group_rules.list", + "security_groups_find": "security_groups.find", + "security_groups_findall": "security_groups.findall", + "security_groups_get": "security_groups.get", + "security_groups_list": "security_groups.list", + "users_change_passwords": "users.change_passwords", + "users_create": "users.create", + "users_delete": "users.delete", + "users_find": "users.find", + "users_findall": "users.findall", + "users_get": "users.get", + "users_grant": "users.grant", + "users_list": "users.list", + "users_list_access": "users.list_access", + "users_revoke": "users.revoke", + "users_update_attributes": "users.update_attributes" + }, + "ironic": { + "_comment": "It uses ironicclient.v1.", + "chassis_create": "chassis.create", + "chassis_delete": "chassis.delete", + "chassis_get": "chassis.get", + "chassis_list": "chassis.list", + "chassis_list_nodes": "chassis.list_nodes", + "chassis_update": "chassis.update", + "driver_delete": "driver.delete", + "driver_get": "driver.get", + "driver_get_vendor_passthru_methods": "driver.get_vendor_passthru_methods", + "driver_list": "driver.list", + "driver_properties": "driver.properties", + "driver_update": "driver.update", + "driver_vendor_passthru": "driver.vendor_passthru", + "node_create": "node.create", + "node_delete": "node.delete", + "node_get": "node.get", + "node_get_boot_device": "node.get_boot_device", + "node_get_by_instance_uuid": "node.get_by_instance_uuid", + "node_get_console": "node.get_console", + "node_get_supported_boot_devices": "node.get_supported_boot_devices", + "node_get_vendor_passthru_methods": "node.get_vendor_passthru_methods", + "node_list": "node.list", + "node_list_ports": "node.list_ports", + "node_set_boot_device": "node.set_boot_device", + "node_set_console_mode": "node.set_console_mode", + "node_set_maintenance": "node.set_maintenance", + "node_set_power_state": "node.set_power_state", + "node_set_provision_state": "node.set_provision_state", + "node_states": "node.states", + "node_update": "node.update", + "node_validate": "node.validate", + "node_vendor_passthru": "node.vendor_passthru", + "port_create": "port.create", + "port_delete": "port.delete", + "port_get": "port.get", + "port_get_by_address": "port.get_by_address", + "port_list": "port.list", + "port_update": "port.update" + }, + "baremetal_introspection": { + "_comment": "It uses ironic_inspector_client.v1.", + "introspect": "introspect", + "get_status": "get_status", + "get_data": "get_data", + "rules_create": "rules.create", + "rules_delete": "rules.delete", + "rules_delete_all": "rules.delete_all", + "rules_from_json": "rules.from_json", + "rules_get": "rules.get", + "rules_get_all": "rules.get_all", + "wait_for_finish": "wait_for_finish" + }, + "swift": { + "_comment": "It uses swiftclient.v1.", + "head_account": "head_account", + "get_account": "get_account", + "post_account": "post_account", + "head_container": "head_container", + "get_container": "get_container", + "put_container": "put_container", + "post_container": "post_container", + "delete_container": "delete_container", + "get_object": "get_object", + "put_object": "put_object", + "post_object": "post_object", + "get_capabilities": "get_capabilities" + }, + "zaqar": { + "_comment": "It uses zaqarclient.v2.", + "queue_messages": "queue_messages", + "queue_post": "queue_post", + "queue_pop": "queue_pop" + }, + "barbican": { + "_comment": "It uses barbicanclient", + "cas_get": "cas.get", + "cas_list": "cas.list", + "cas_total": "cas.total", + "containers_create": "containers.create", + "containers_create_certificate": "containers.create_certificate", + "containers_create_rsa": "containers.create_rsa", + "containers_delete": "containers.delete", + "containers_get": "containers.get", + "containers_list": "containers.list", + "containers_register_consumer": "containers.register_consumer", + "containers_remove_consumer": "containers.remove_consumer", + "containers_total": "containers.total", + "orders_create": "orders.create", + "orders_create_asymmetric": "orders.create_asymmetric", + "orders_create_certificate": "orders.create_certificate", + "orders_create_key": "orders.create_key", + "orders_delete": "orders.delete", + "orders_get": "orders.get", + "orders_list": "orders.list", + "orders_total": "orders.total", + "secrets_create": "secrets.create", + "secrets_delete": "secrets.delete", + "secrets_get": "secrets.get", + "secrets_list": "secrets.list", + "secrets_total": "secrets.total" + }, + "mistral": { + "_comment": "It uses mistralclient.v2.", + "action_executions_create": "action_executions.create", + "action_executions_delete": "action_executions.delete", + "action_executions_find": "action_executions.find", + "action_executions_get": "action_executions.get", + "action_executions_list": "action_executions.list", + "action_executions_update": "action_executions.update", + "actions_create": "actions.create", + "actions_delete": "actions.delete", + "actions_find": "actions.find", + "actions_get": "actions.get", + "actions_list": "actions.list", + "actions_update": "actions.update", + "cron_triggers_create": "cron_triggers.create", + "cron_triggers_delete": "cron_triggers.delete", + "cron_triggers_find": "cron_triggers.find", + "cron_triggers_get": "cron_triggers.get", + "cron_triggers_list": "cron_triggers.list", + "environments_create": "environments.create", + "environments_delete": "environments.delete", + "environments_find": "environments.find", + "environments_get": "environments.get", + "environments_list": "environments.list", + "environments_update": "environments.update", + "executions_create": "executions.create", + "executions_delete": "executions.delete", + "executions_find": "executions.find", + "executions_get": "executions.get", + "executions_list": "executions.list", + "executions_update": "executions.update", + "members_create": "members.create", + "members_delete": "members.delete", + "members_find": "members.find", + "members_get": "members.get", + "members_list": "members.list", + "members_update": "members.update", + "services_find": "services.find", + "services_list": "services.list", + "tasks_find": "tasks.find", + "tasks_get": "tasks.get", + "tasks_list": "tasks.list", + "tasks_rerun": "tasks.rerun", + "workbooks_create": "workbooks.create", + "workbooks_delete": "workbooks.delete", + "workbooks_find": "workbooks.find", + "workbooks_get": "workbooks.get", + "workbooks_list": "workbooks.list", + "workbooks_update": "workbooks.update", + "workbooks_validate": "workbooks.validate", + "workflows_create": "workflows.create", + "workflows_delete": "workflows.delete", + "workflows_find": "workflows.find", + "workflows_get": "workflows.get", + "workflows_list": "workflows.list", + "workflows_update": "workflows.update", + "workflows_validate": "workflows.validate" + }, + "designate": { + "_comment": "It uses designateclient.v1.", + "diagnostics_ping": "diagnostics.ping", + "domains_create ": "domains.create", + "domains_delete": "domains.delete", + "domains_get": "domains.get", + "domains_list": "domains.list", + "domains_list_domain_servers": "domains.list_domain_servers", + "domains_update": "domains.update", + "quotas_get": "quotas.get", + "quotas_reset": "quotas.reset", + "quotas_update": "quotas.update", + "records_create": "records.create", + "records_delete": "records.delete", + "records_get": "records.get", + "records_list": "records.list", + "records_update": "records.update", + "reports_count_all": "reports.count_all", + "reports_count_domains": "reports.count_domains", + "reports_count_records": "reports.count_records", + "reports_count_tenants": "reports.count_tenants", + "reports_tenant_domains": "reports.tenant_domains", + "reports_tenants_all": "reports.tenants_all", + "servers_create": "servers.create", + "servers_delete": "servers.delete", + "servers_get": "servers.get", + "servers_list": "servers.list", + "servers_update": "servers.update", + "sync_sync_all": "sync.sync_all", + "sync_sync_domain": "sync.sync_domain", + "sync_sync_record": "sync.sync_record", + "touch_domain": "touch.domain" + }, + "magnum": { + "_comment": "It uses magnumclient.v1.", + "baymodels_create": "baymodels.create", + "baymodels_delete": "baymodels.delete", + "baymodels_get": "baymodels.get", + "baymodels_list": "baymodels.list", + "baymodels_update": "baymodels.update", + "bays_create": "bays.create", + "bays_delete": "bays.delete", + "bays_get": "bays.get", + "bays_list": "bays.list", + "bays_update": "bays.update", + "certificates_create": "certificates.create", + "certificates_delete": "certificates.delete", + "certificates_get": "certificates.get", + "certificates_list": "certificates.list", + "certificates_update": "certificates.update", + "mservices_create": "mservices.create", + "mservices_delete": "mservices.delete", + "mservices_get": "mservices.get", + "mservices_list": "mservices.list", + "mservices_update": "mservices.update" + }, + "murano":{ + "_comment": "It uses muranoclient.v1.", + "categories_add": "categories.add", + "categories_delete": "categories.delete", + "categories_get": "categories.get", + "categories_list": "categories.list", + "deployments_list": "deployments.list", + "deployments_reports": "deployments.reports", + "env_templates_clone": "env_templates.clone", + "env_templates_create": "env_templates.create", + "env_templates_create_app": "env_templates.create_app", + "env_templates_create_env": "env_templates.create_env", + "env_templates_delete": "env_templates.delete", + "env_templates_delete_app": "env_templates.delete_app", + "env_templates_get": "env_templates.get", + "env_templates_list": "env_templates.list", + "env_templates_update": "env_templates.update", + "environments_create": "environments.create", + "environments_delete": "environments.delete", + "environments_find": "environments.find", + "environments_findall": "environments.findall", + "environments_get": "environments.get", + "environments_last_status": "environments.last_status", + "environments_list": "environments.list", + "environments_update": "environments.update", + "instance_statistics_get": "instance_statistics.get", + "instance_statistics_get_aggregated": "instance_statistics.get_aggregated", + "packages_categories": "packages.categories", + "packages_create": "packages.create", + "packages_delete": "packages.delete", + "packages_download": "packages.download", + "packages_filter": "packages.filter", + "packages_get": "packages.get", + "packages_get_logo": "packages.get_logo", + "packages_get_supplier_logo": "packages.get_supplier_logo", + "packages_get_ui": "packages.get_ui", + "packages_list": "packages.list", + "packages_toggle_active": "packages.toggle_active", + "packages_toggle_public": "packages.toggle_public", + "packages_update": "packages.update", + "request_statistics_list": "request_statistics.list", + "services_delete": "services.delete", + "services_get": "services.get", + "services_list": "services.list", + "services_post": "services.post", + "sessions_configure": "sessions.configure", + "sessions_delete": "sessions.delete", + "sessions_deploy": "sessions.deploy", + "sessions_get": "sessions.get" + }, + "tacker":{ + "_comment": "It uses tackerclient.v1_0.", + "list_extensions": "list_extensions", + "show_extension": "show_extension", + "create_vnfd": "create_vnfd", + "delete_vnfd": "delete_vnfd", + "list_vnfds": "list_vnfds", + "show_vnfd": "show_vnfd", + "create_vnf": "create_vnf", + "update_vnf": "update_vnf", + "delete_vnf": "delete_vnf", + "list_vnfs": "list_vnfs", + "show_vnf": "show_vnf", + "create_vim": "create_vim", + "update_vim": "update_vim", + "delete_vim": "delete_vim", + "list_vims": "list_vims", + "show_vim": "show_vim" + } +} diff --git a/src/files/resources.tar b/src/files/resources.tar new file mode 100644 index 0000000..f13432b Binary files /dev/null and b/src/files/resources.tar differ diff --git a/src/icon.svg b/src/icon.svg new file mode 100644 index 0000000..c90ecfc --- /dev/null +++ b/src/icon.svg @@ -0,0 +1,450 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/layer.yaml b/src/layer.yaml new file mode 100644 index 0000000..a71fc3a --- /dev/null +++ b/src/layer.yaml @@ -0,0 +1,5 @@ +includes: ['layer:openstack-api'] +options: + basic: + use_venv: True + include_system_packages: True \ No newline at end of file diff --git a/src/lib/charm/openstack/__init__.py b/src/lib/charm/openstack/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/lib/charm/openstack/mistral.py b/src/lib/charm/openstack/mistral.py new file mode 100644 index 0000000..4b1375f --- /dev/null +++ b/src/lib/charm/openstack/mistral.py @@ -0,0 +1,95 @@ +import collections +import socket +import subprocess + +import charmhelpers.core.hookenv as hookenv +import charms_openstack.charm +import charms_openstack.ip as os_ip + +# import charms_openstack.sdn.odl as odl +# import charms_openstack.sdn.ovs as ovs + + +class MistralCharm(charms_openstack.charm.HAOpenStackCharm): + + # Internal name of charm + service_name = name = 'mistral' + + # First release supported + release = 'mitaka' + + # List of packages to install for this charm + packages = ['mistral-api', 'mistral-engine', 'mistral-executor', 'python-apt'] + + api_ports = { + 'mistral-api': { + os_ip.PUBLIC: 8989, + os_ip.ADMIN: 8989, + os_ip.INTERNAL: 8989, + } + } + + service_type = 'mistral' + default_service = 'mistral-api' + services = ['haproxy', 'mistral-api', 'mistral-engine', 'mistral-executor'] + + # Note that the hsm interface is optional - defined in config.yaml + required_relations = ['shared-db', 'amqp', 'identity-service'] + + restart_map = { + '/etc/mistral/mistral.conf': services, + '/etc/mistral/policy.json': services, + '/etc/mistral/logging.conf': services, + '/etc/mistral/wf_trace_logging.conf': services} + + ha_resources = ['vips', 'haproxy'] + + release_pkg = 'mistral-common' + + package_codenames = { + 'mistral-common': collections.OrderedDict([ + ('2', 'mitaka'), + ('3', 'newton'), + ('4', 'ocata'), + ]), + } + + sync_cmd = ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'upgrade', 'head'] + + def db_sync(self): + """Perform a database sync using the command defined in the + self.sync_cmd attribute. The services defined in self.services are + restarted after the database sync. + """ + if not self.db_sync_done() and hookenv.is_leader(): + + subprocess.check_call(['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'upgrade', 'head']) + subprocess.check_call(['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'stamp', 'head']) + subprocess.check_call(['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'populate']) + + hookenv.leader_set({'db-sync-done': True}) + # Restart services immediately after db sync as + # render_domain_config needs a working system + self.restart_all() + + def get_amqp_credentials(self): + return ('mistral', 'mistral') + + def get_database_setup(self): + return [{ + 'database': 'mistral', + 'username': 'mistral', + 'hostname': hookenv.unit_private_ip() },] + + @property + def public_url(self): + return super().public_url + "/v2" + + @property + def admin_url(self): + return super().admin_url + "/v2" + + @property + def internal_url(self): + return super().internal_url + "/v2" + diff --git a/src/metadata.yaml b/src/metadata.yaml new file mode 100644 index 0000000..58b7c01 --- /dev/null +++ b/src/metadata.yaml @@ -0,0 +1,10 @@ +name: mistral +summary: Short summary of charm +description: | + Longline summary of charm +tags: + - openstack +series: + - xenial + - trusty + - yakkety \ No newline at end of file diff --git a/src/reactive/mistral_handlers.py b/src/reactive/mistral_handlers.py new file mode 100644 index 0000000..114ebb4 --- /dev/null +++ b/src/reactive/mistral_handlers.py @@ -0,0 +1,82 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess + +import charms_openstack.charm as charm +import charms.reactive as reactive + +# This charm's library contains all of the handler code associated with +# sdn_charm +import charm.openstack.mistral as mistral # noqa + +charm.use_defaults( + 'charm.installed', + 'amqp.connected', + 'shared-db.connected', + 'identity-service.connected', + 'identity-service.available', # enables SSL support + 'config.changed', + 'update-status') + + +def horrible_hack_to_workaround_missing_package_files(): + import tarfile + import shutil + resource_tar = 'files/resources.tar' + archive = tarfile.open(resource_tar) + archive.extractall('/usr/lib/python2.7/dist-packages/mistral') + shutil.copyfile( + 'files/mapping.json', + '/usr/lib/python2.7/dist-packages/mistral/actions/openstack/mapping.json') + + +@reactive.when_not('config.rendered') +def mask_svc(): + mistral_svcs = ['mistral-api', 'mistral-engine', 'mistral-executor'] + for svc in mistral_svcs: + subprocess.check_call(['systemctl', 'stop', svc]) + subprocess.check_call(['systemctl', 'mask', svc]) + + +@reactive.when('config.rendered') +def unmask_svc(): + mistral_svcs = ['mistral-api', 'mistral-engine', 'mistral-executor'] + for svc in mistral_svcs: + subprocess.check_call(['systemctl', 'unmask', svc]) + subprocess.check_call(['systemctl', 'start', svc]) + + +@reactive.when('shared-db.available') +@reactive.when('identity-service.available') +@reactive.when('amqp.available') +def render_config(*args): + """Render the configuration for charm when all the interfaces are + available. + """ + horrible_hack_to_workaround_missing_package_files() + with charm.provide_charm_instance() as charm_class: + charm_class.render_with_interfaces(args) + charm_class.db_sync() + charm_class.upgrade_if_available(args) + charm_class.assess_status() + reactive.set_state('config.rendered') + + +@reactive.when('ha.connected') +def cluster_connected(hacluster): + """Configure HA resources in corosync""" + with charm.provide_charm_instance() as charm_class: + charm_class.configure_ha_resources(hacluster) + charm_class.assess_status() diff --git a/src/templates/liberty/.keep b/src/templates/liberty/.keep new file mode 100644 index 0000000..e69de29 diff --git a/src/templates/mitaka/.keep b/src/templates/mitaka/.keep new file mode 100644 index 0000000..e69de29 diff --git a/src/templates/mitaka/logging.conf b/src/templates/mitaka/logging.conf new file mode 100644 index 0000000..58c5dea --- /dev/null +++ b/src/templates/mitaka/logging.conf @@ -0,0 +1,32 @@ +[loggers] +keys=root + +[handlers] +keys=consoleHandler, fileHandler + +[formatters] +keys=verboseFormatter, simpleFormatter + +[logger_root] +level=DEBUG +handlers=consoleHandler, fileHandler + +[handler_consoleHandler] +class=StreamHandler +level=INFO +formatter=simpleFormatter +args=(sys.stdout,) + +[handler_fileHandler] +class=FileHandler +level=INFO +formatter=verboseFormatter +args=("/var/log/mistral.log",) + +[formatter_verboseFormatter] +format=%(asctime)s %(thread)s %(levelname)s %(module)s [-] %(message)s +datefmt= + +[formatter_simpleFormatter] +format=%(asctime)s %(levelname)s [-] %(message)s +datefmt= diff --git a/src/templates/mitaka/mistral.conf b/src/templates/mitaka/mistral.conf new file mode 100644 index 0000000..307ae27 --- /dev/null +++ b/src/templates/mitaka/mistral.conf @@ -0,0 +1,1001 @@ +[DEFAULT] + +# +# From mistral.config +# + +# Enables debugger. Note that using this option changes how the +# eventlet library is used to support async IO. This could result in +# failures that do not occur under normal operation. Use at your own +# risk. (boolean value) +#use_debugger = false + +# Specifies which mistral server to start by the launch script. Valid +# options are all or any combination of api, engine, and executor. +# (list value) +#server = all + +# Logger name for pretty workflow trace output. (string value) +#workflow_trace_log_name = workflow_trace + +# Specifies whether Mistral uses modified oslo.messaging (if True) or +# original oslo.messaging. Modified oslo.messaging is done for +# acknowledgement a message after processing. (boolean value) +#use_mistral_rpc = false + +# +# From oslo.log +# + +# If set to true, the logging level will be set to DEBUG instead of +# the default INFO level. (boolean value) +debug = {{ options.verbose }} + +# If set to false, the logging level will be set to WARNING instead of +# the default INFO level. (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +verbose = {{ options.debug }} + +# The name of a logging configuration file. This file is appended to +# any existing logging configuration files. For details about logging +# configuration files, see the Python logging module documentation. +# Note that when logging configuration files are used then all logging +# configuration is set in the configuration file and other logging +# configuration options are ignored (for example, +# logging_context_format_string). (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append = + +# Defines the format string for %%(asctime)s in log records. Default: +# %(default)s . This option is ignored if log_config_append is set. +# (string value) +#log_date_format = %Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to send logging output to. If no default +# is set, logging will go to stderr as defined by use_stderr. This +# option is ignored if log_config_append is set. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file = + +# (Optional) The base directory used for relative log_file paths. +# This option is ignored if log_config_append is set. (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir = + +# Uses logging handler designed to watch file system. When log file is +# moved or removed this handler will open a new log file with +# specified path instantaneously. It makes sense only if log_file +# option is specified and Linux platform is used. This option is +# ignored if log_config_append is set. (boolean value) +#watch_log_file = false + +# Use syslog for logging. Existing syslog format is DEPRECATED and +# will be changed later to honor RFC5424. This option is ignored if +# log_config_append is set. (boolean value) +use_syslog = {{ options.use_syslog }} + +# Syslog facility to receive log lines. This option is ignored if +# log_config_append is set. (string value) +#syslog_log_facility = LOG_USER + +# Log output to standard error. This option is ignored if +# log_config_append is set. (boolean value) +#use_stderr = true + +# Format string to use for log messages with context. (string value) +#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages when context is undefined. +# (string value) +#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Additional data to append to log message when logging level for the +# message is DEBUG. (string value) +#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. (string +# value) +#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s + +# Defines the format string for %(user_identity)s that is used in +# logging_context_format_string. (string value) +#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s + +# List of package logging levels in logger=LEVEL pairs. This option is +# ignored if log_config_append is set. (list value) +#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO + +# Enables or disables publication of error events. (boolean value) +#publish_errors = false + +# The format for an instance that is passed with the log message. +# (string value) +#instance_format = "[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log message. +# (string value) +#instance_uuid_format = "[instance: %(uuid)s] " + +# Enables or disables fatal status of deprecations. (boolean value) +#fatal_deprecations = false + +# +# From oslo.messaging +# + +# Size of RPC connection pool. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size +#rpc_conn_pool_size = 30 + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve to this +# address. (string value) +#rpc_zmq_bind_address = * + +# MatchMaker driver. (string value) +# Allowed values: redis, dummy +#rpc_zmq_matchmaker = redis + +# Type of concurrency used. Either "native" or "eventlet" (string +# value) +#rpc_zmq_concurrency = eventlet + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts = 1 + +# Maximum number of ingress messages to locally buffer per topic. +# Default is unlimited. (integer value) +#rpc_zmq_topic_backlog = + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir = /var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP address. +# Must match "host" option, if running Nova. (string value) +#rpc_zmq_host = localhost + +# Seconds to wait before a cast expires (TTL). The default value of -1 +# specifies an infinite linger period. The value of 0 specifies no +# linger period. Pending messages shall be discarded immediately when +# the socket is closed. Only supported by impl_zmq. (integer value) +#rpc_cast_timeout = -1 + +# The default number of seconds that poll should wait. Poll raises +# timeout exception when timeout expired. (integer value) +#rpc_poll_timeout = 1 + +# Expiration timeout in seconds of a name service record about +# existing target ( < 0 means no timeout). (integer value) +#zmq_target_expire = 120 + +# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. +# (boolean value) +#use_pub_sub = true + +# Minimal port number for random ports range. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#rpc_zmq_min_port = 49152 + +# Maximal port number for random ports range. (integer value) +# Minimum value: 1 +# Maximum value: 65536 +#rpc_zmq_max_port = 65536 + +# Number of retries to find free port number before fail with +# ZMQBindError. (integer value) +#rpc_zmq_bind_port_retries = 100 + +# Size of executor thread pool. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size +#executor_thread_pool_size = 64 + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout = 60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend option +# and driver specific configuration. (string value) +#transport_url = + +# The messaging driver to use, defaults to rabbit. Other drivers +# include amqp and zmq. (string value) +#rpc_backend = rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the transport_url +# option. (string value) +#control_exchange = openstack + + +[api] + +# +# From mistral.config +# + +# Mistral API server host (string value) +host = {{ options.service_listen_info.mistral_api.ip }} + +# Mistral API server port (port value) +# Minimum value: 0 +# Maximum value: 65535 +port = {{ options.service_listen_info.mistral_api.port }} + +# Enables the ability to delete action_execution which has no +# relationship with workflows. (boolean value) +#allow_action_execution_deletion = false + + +[coordination] + +# +# From mistral.config +# + +# The backend URL to be used for coordination (string value) +#backend_url = + +# Number of seconds between heartbeats for coordination. (floating +# point value) +#heartbeat_interval = 5.0 + + +[database] +connection = {{ shared_db.uri }} + +# +# From oslo.db +# + +# The file name to use with SQLite. (string value) +# Deprecated group/name - [DEFAULT]/sqlite_db +#sqlite_db = oslo.sqlite + +# If True, SQLite uses synchronous mode. (boolean value) +# Deprecated group/name - [DEFAULT]/sqlite_synchronous +#sqlite_synchronous = true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string to use to connect to the database. +# (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = + +# The SQLAlchemy connection string to use to connect to the slave +# database. (string value) +#slave_connection = + +# The SQL mode to be used for MySQL sessions. This option, including +# the default, overrides any server-set SQL mode. To use whatever SQL +# mode is set by the server configuration, set this to no value. +# Example: mysql_sql_mode= (string value) +#mysql_sql_mode = TRADITIONAL + +# Timeout before idle SQL connections are reaped. (integer value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout = 3600 + +# Minimum number of SQL connections to keep open in a pool. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = + +# Maximum number of database connection retries during startup. Set to +# -1 to specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a SQL connection. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with SQLAlchemy. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = 50 + +# Verbosity of SQL debugging information: 0=None, 100=Everything. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add Python stack traces to SQL as comment strings. (boolean value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = false + +# If set, use this value for pool_timeout with SQLAlchemy. (integer +# value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on connection +# lost. (boolean value) +#use_db_reconnect = false + +# Seconds between retries of a database transaction. (integer value) +#db_retry_interval = 1 + +# If True, increases the interval between retries of a database +# operation up to db_max_retry_interval. (boolean value) +#db_inc_retry_interval = true + +# If db_inc_retry_interval is set, the maximum seconds between retries +# of a database operation. (integer value) +#db_max_retry_interval = 10 + +# Maximum retries in case of connection error or deadlock error before +# error is raised. Set to -1 to specify an infinite retry count. +# (integer value) +#db_max_retries = 20 + + +[engine] + +# +# From mistral.config +# + +# Mistral engine plugin (string value) +#engine = default + +# Name of the engine node. This can be an opaque identifier. It is not +# necessarily a hostname, FQDN, or IP address. (string value) +#host = 0.0.0.0 + +# The message topic that the engine listens on. (string value) +#topic = mistral_engine + +# The version of the engine. (string value) +#version = 1.0 + +# The default maximum size in KB of large text fields of runtime +# execution objects. Use -1 for no limit. (integer value) +#execution_field_size_limit_kb = 1024 + + +[execution_expiration_policy] + +# +# From mistral.config +# + +# How often will the executions be evaluated (in minutes). For example +# for value 120 the interval will be 2 hours (every 2 hours). (integer +# value) +#evaluation_interval = + +# Evaluate from which time remove executions in minutes. For example +# when older_than = 60, remove all executions that finished a 60 +# minutes ago or more. Minimum value is 1. Note that only final state +# execution will remove ( SUCCESS / ERROR ). (integer value) +#older_than = + + +[executor] + +# +# From mistral.config +# + +# Name of the executor node. This can be an opaque identifier. It is +# not necessarily a hostname, FQDN, or IP address. (string value) +#host = 0.0.0.0 + +# The message topic that the executor listens on. (string value) +#topic = mistral_executor + +# The version of the executor. (string value) +#version = 1.0 + + +# Mistal only works with keystone v3 atm +[keystone_authtoken] +auth_uri = {{ identity_service.service_protocol }}://{{ identity_service.service_host }}:{{ identity_service.service_port }}/v3 +auth_url = {{ identity_service.auth_protocol }}://{{ identity_service.auth_host }}:{{ identity_service.auth_port }} +auth_type = password +project_domain_name = default +user_domain_name = default +project_name = services +username = {{ identity_service.service_username }} +password = {{ identity_service.service_password }} +admin_user = {{ identity_service.service_username }} +admin_password = {{ identity_service.service_password }} +admin_tenant_name = services + +# +# From keystonemiddleware.auth_token +# + +# Complete public Identity API endpoint. (string value) +#auth_uri = + +# API version of the admin Identity API endpoint. (string value) +#auth_version = + +# Do not handle authorization requests within the middleware, but +# delegate the authorization decision to downstream WSGI components. +# (boolean value) +#delay_auth_decision = false + +# Request timeout value for communicating with Identity API server. +# (integer value) +#http_connect_timeout = + +# How many times are we trying to reconnect when communicating with +# Identity API Server. (integer value) +#http_request_max_retries = 3 + +# Env key for the swift cache. (string value) +#cache = + +# Required if identity server requires client certificate (string +# value) +#certfile = + +# Required if identity server requires client certificate (string +# value) +#keyfile = + +# A PEM encoded Certificate Authority to use when verifying HTTPs +# connections. Defaults to system CAs. (string value) +#cafile = + +# Verify HTTPS connections. (boolean value) +#insecure = false + +# The region in which the identity server can be found. (string value) +#region_name = + +# Directory used to cache files related to PKI tokens. (string value) +#signing_dir = + +# Optionally specify a list of memcached server(s) to use for caching. +# If left undefined, tokens will instead be cached in-process. (list +# value) +# Deprecated group/name - [DEFAULT]/memcache_servers +#memcached_servers = + +# In order to prevent excessive effort spent validating tokens, the +# middleware caches previously-seen tokens for a configurable duration +# (in seconds). Set to -1 to disable caching completely. (integer +# value) +#token_cache_time = 300 + +# Determines the frequency at which the list of revoked tokens is +# retrieved from the Identity service (in seconds). A high number of +# revocation events combined with a low cache duration may +# significantly reduce performance. (integer value) +#revocation_cache_time = 10 + +# (Optional) If defined, indicate whether token data should be +# authenticated or authenticated and encrypted. If MAC, token data is +# authenticated (with HMAC) in the cache. If ENCRYPT, token data is +# encrypted and authenticated in the cache. If the value is not one of +# these options or empty, auth_token will raise an exception on +# initialization. (string value) +# Allowed values: None, MAC, ENCRYPT +#memcache_security_strategy = None + +# (Optional, mandatory if memcache_security_strategy is defined) This +# string is used for key derivation. (string value) +#memcache_secret_key = + +# (Optional) Number of seconds memcached server is considered dead +# before it is tried again. (integer value) +#memcache_pool_dead_retry = 300 + +# (Optional) Maximum total number of open connections to every +# memcached server. (integer value) +#memcache_pool_maxsize = 10 + +# (Optional) Socket timeout in seconds for communicating with a +# memcached server. (integer value) +#memcache_pool_socket_timeout = 3 + +# (Optional) Number of seconds a connection to memcached is held +# unused in the pool before it is closed. (integer value) +#memcache_pool_unused_timeout = 60 + +# (Optional) Number of seconds that an operation will wait to get a +# memcached client connection from the pool. (integer value) +#memcache_pool_conn_get_timeout = 10 + +# (Optional) Use the advanced (eventlet safe) memcached client pool. +# The advanced pool will only work under python 2.x. (boolean value) +#memcache_use_advanced_pool = false + +# (Optional) Indicate whether to set the X-Service-Catalog header. If +# False, middleware will not ask for service catalog on token +# validation and will not set the X-Service-Catalog header. (boolean +# value) +#include_service_catalog = true + +# Used to control the use and type of token binding. Can be set to: +# "disabled" to not check token binding. "permissive" (default) to +# validate binding information if the bind type is of a form known to +# the server and ignore it if not. "strict" like "permissive" but if +# the bind type is unknown the token will be rejected. "required" any +# form of token binding is needed to be allowed. Finally the name of a +# binding method that must be present in tokens. (string value) +#enforce_token_bind = permissive + +# If true, the revocation list will be checked for cached tokens. This +# requires that PKI tokens are configured on the identity server. +# (boolean value) +#check_revocations_for_cached = false + +# Hash algorithms to use for hashing PKI tokens. This may be a single +# algorithm or multiple. The algorithms are those supported by Python +# standard hashlib.new(). The hashes will be tried in the order given, +# so put the preferred one first for performance. The result of the +# first hash will be stored in the cache. This will typically be set +# to multiple values only while migrating from a less secure algorithm +# to a more secure one. Once all the old tokens are expired this +# option should be set to a single value for better performance. (list +# value) +#hash_algorithms = md5 + +# Prefix to prepend at the beginning of the path. Deprecated, use +# identity_uri. (string value) +#auth_admin_prefix = + +# Host providing the admin Identity API endpoint. Deprecated, use +# identity_uri. (string value) +#auth_host = 127.0.0.1 + +# Port of the admin Identity API endpoint. Deprecated, use +# identity_uri. (integer value) +#auth_port = 35357 + +# Protocol of the admin Identity API endpoint. Deprecated, use +# identity_uri. (string value) +# Allowed values: http, https +#auth_protocol = http + +# Complete admin Identity API endpoint. This should specify the +# unversioned root endpoint e.g. https://localhost:35357/ (string +# value) +#identity_uri = + +# This option is deprecated and may be removed in a future release. +# Single shared secret with the Keystone configuration used for +# bootstrapping a Keystone installation, or otherwise bypassing the +# normal authentication process. This option should not be used, use +# `admin_user` and `admin_password` instead. (string value) +#admin_token = + +# Service username. (string value) +#admin_user = admin + +# Service user password. (string value) +#admin_password = + +# Service tenant name. (string value) +#admin_tenant_name = admin + +# Authentication type to load (unknown value) +# Deprecated group/name - [DEFAULT]/auth_plugin +#auth_type = + +# Config Section from which to load plugin specific options (unknown +# value) +#auth_section = + + +[matchmaker_redis] + +# +# From oslo.messaging +# + +# Host to locate redis. (string value) +#host = 127.0.0.1 + +# Use this port to connect to redis host. (port value) +# Minimum value: 0 +# Maximum value: 65535 +#port = 6379 + +# Password for Redis server (optional). (string value) +#password = + +# List of Redis Sentinel hosts (fault tolerance mode) e.g. +# [host:port, host1:port ... ] (list value) +#sentinel_hosts = + +# Redis replica set name. (string value) +#sentinel_group_name = oslo-messaging-zeromq + +# Time in ms to wait between connection attempts. (integer value) +#wait_timeout = 500 + +# Time in ms to wait before the transaction is killed. (integer value) +#check_timeout = 20000 + +# Timeout in ms on blocking socket operations (integer value) +#socket_timeout = 1000 + + +{% include "parts/section-rabbitmq-oslo" %} + +# +# From oslo.messaging +# + +# address prefix used when sending to a specific server (string value) +# Deprecated group/name - [amqp1]/server_request_prefix +#server_request_prefix = exclusive + +# address prefix used when broadcasting to all servers (string value) +# Deprecated group/name - [amqp1]/broadcast_prefix +#broadcast_prefix = broadcast + +# address prefix when sending to any server in group (string value) +# Deprecated group/name - [amqp1]/group_request_prefix +#group_request_prefix = unicast + +# Name for the AMQP container (string value) +# Deprecated group/name - [amqp1]/container_name +#container_name = + +# Timeout for inactive connections (in seconds) (integer value) +# Deprecated group/name - [amqp1]/idle_timeout +#idle_timeout = 0 + +# Debug: dump AMQP frames to stdout (boolean value) +# Deprecated group/name - [amqp1]/trace +#trace = false + +# CA certificate PEM file to verify server certificate (string value) +# Deprecated group/name - [amqp1]/ssl_ca_file +#ssl_ca_file = + +# Identifying certificate PEM file to present to clients (string +# value) +# Deprecated group/name - [amqp1]/ssl_cert_file +#ssl_cert_file = + +# Private key PEM file used to sign cert_file certificate (string +# value) +# Deprecated group/name - [amqp1]/ssl_key_file +#ssl_key_file = + +# Password for decrypting ssl_key_file (if encrypted) (string value) +# Deprecated group/name - [amqp1]/ssl_key_password +#ssl_key_password = + +# Accept clients using either SSL or plain TCP (boolean value) +# Deprecated group/name - [amqp1]/allow_insecure_clients +#allow_insecure_clients = false + +# Space separated list of acceptable SASL mechanisms (string value) +# Deprecated group/name - [amqp1]/sasl_mechanisms +#sasl_mechanisms = + +# Path to directory that contains the SASL configuration (string +# value) +# Deprecated group/name - [amqp1]/sasl_config_dir +#sasl_config_dir = + +# Name of configuration file (without .conf suffix) (string value) +# Deprecated group/name - [amqp1]/sasl_config_name +#sasl_config_name = + +# User name for message broker authentication (string value) +# Deprecated group/name - [amqp1]/username +#username = + +# Password for message broker authentication (string value) +# Deprecated group/name - [amqp1]/password +#password = + + +[oslo_messaging_notifications] + +# +# From oslo.messaging +# + +# The Drivers(s) to handle sending notifications. Possible values are +# messaging, messagingv2, routing, log, test, noop (multi valued) +# Deprecated group/name - [DEFAULT]/notification_driver +#driver = + +# A URL representing the messaging driver to use for notifications. If +# not set, we fall back to the same configuration used for RPC. +# (string value) +# Deprecated group/name - [DEFAULT]/notification_transport_url +#transport_url = + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +# Deprecated group/name - [DEFAULT]/notification_topics +#topics = notifications + + + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_durable_queues +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues = false + +# Auto-delete queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_auto_delete +#amqp_auto_delete = false + +# SSL version to use (valid only if SSL enabled). Valid values are +# TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be +# available on some distributions. (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_version +#kombu_ssl_version = + +# SSL key file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile +#kombu_ssl_keyfile = + +# SSL cert file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile +#kombu_ssl_certfile = + +# SSL certification authority file (valid only if SSL enabled). +# (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs +#kombu_ssl_ca_certs = + +# How long to wait before reconnecting in response to an AMQP consumer +# cancel notification. (floating point value) +# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay +#kombu_reconnect_delay = 1.0 + +# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression +# will not be used. This option may notbe available in future +# versions. (string value) +#kombu_compression = + +# How long to wait a missing client beforce abandoning to send it its +# replies. This value should not be longer than rpc_response_timeout. +# (integer value) +# Deprecated group/name - [DEFAULT]/kombu_reconnect_timeout +#kombu_missing_consumer_retry_timeout = 60 + +# Determines how the next RabbitMQ node is chosen in case the one we +# are currently connected to becomes unavailable. Takes effect only if +# more than one RabbitMQ node is provided in config. (string value) +# Allowed values: round-robin, shuffle +#kombu_failover_strategy = round-robin + +# The RabbitMQ broker address where a single node is used. (string +# value) +# Deprecated group/name - [DEFAULT]/rabbit_host +#rabbit_host = localhost + +# The RabbitMQ broker port where a single node is used. (port value) +# Minimum value: 0 +# Maximum value: 65535 +# Deprecated group/name - [DEFAULT]/rabbit_port +#rabbit_port = 5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +# Deprecated group/name - [DEFAULT]/rabbit_hosts +#rabbit_hosts = $rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_use_ssl +#rabbit_use_ssl = false + +# The RabbitMQ userid. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_userid +#rabbit_userid = guest + +# The RabbitMQ password. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_password +#rabbit_password = guest + +# The RabbitMQ login method. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_login_method +#rabbit_login_method = AMQPLAIN + +# The RabbitMQ virtual host. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_virtual_host +#rabbit_virtual_host = / + +# How frequently to retry connecting with RabbitMQ. (integer value) +#rabbit_retry_interval = 1 + +# How long to backoff for between retries when connecting to RabbitMQ. +# (integer value) +# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff +#rabbit_retry_backoff = 2 + +# Maximum interval of RabbitMQ connection retries. Default is 30 +# seconds. (integer value) +#rabbit_interval_max = 30 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +# Deprecated group/name - [DEFAULT]/rabbit_max_retries +#rabbit_max_retries = 0 + +# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, +# queue mirroring is no longer controlled by the x-ha-policy argument +# when declaring a queue. If you just want to make sure that all +# queues (except those with auto-generated names) are mirrored across +# all nodes, run: "rabbitmqctl set_policy HA '^(?!amq\.).*' '{"ha- +# mode": "all"}' " (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_ha_queues +#rabbit_ha_queues = false + +# Positive integer representing duration in seconds for queue TTL +# (x-expires). Queues which are unused for the duration of the TTL are +# automatically deleted. The parameter affects only reply and fanout +# queues. (integer value) +# Minimum value: 1 +#rabbit_transient_queues_ttl = 1800 + +# Specifies the number of messages to prefetch. Setting to zero allows +# unlimited messages. (integer value) +#rabbit_qos_prefetch_count = 0 + +# Number of seconds after which the Rabbit broker is considered down +# if heartbeat's keep-alive fails (0 disable the heartbeat). +# EXPERIMENTAL (integer value) +#heartbeat_timeout_threshold = 60 + +# How often times during the heartbeat_timeout_threshold we check the +# heartbeat. (integer value) +#heartbeat_rate = 2 + +# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake +# (boolean value) +# Deprecated group/name - [DEFAULT]/fake_rabbit +#fake_rabbit = false + +# Maximum number of channels to allow (integer value) +#channel_max = + +# The maximum byte size for an AMQP frame (integer value) +#frame_max = + +# How often to send heartbeats for consumer's connections (integer +# value) +#heartbeat_interval = 1 + +# Enable SSL (boolean value) +#ssl = + +# Arguments passed to ssl.wrap_socket (dict value) +#ssl_options = + +# Set socket timeout in seconds for connection's socket (floating +# point value) +#socket_timeout = 0.25 + +# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating +# point value) +#tcp_user_timeout = 0.25 + +# Set delay for reconnection to some host which has connection error +# (floating point value) +#host_connection_reconnect_delay = 0.25 + +# Maximum number of connections to keep queued. (integer value) +#pool_max_size = 10 + +# Maximum number of connections to create above `pool_max_size`. +# (integer value) +#pool_max_overflow = 0 + +# Default number of seconds to wait for a connections to available +# (integer value) +#pool_timeout = 30 + +# Lifetime of a connection (since creation) in seconds or None for no +# recycling. Expired connections are closed on acquire. (integer +# value) +#pool_recycle = 600 + +# Threshold at which inactive (since release) connections are +# considered stale in seconds or None for no staleness. Stale +# connections are closed on acquire. (integer value) +#pool_stale = 60 + +# Persist notification messages. (boolean value) +#notification_persistence = false + +# Exchange name for for sending notifications (string value) +#default_notification_exchange = ${control_exchange}_notification + +# Max number of not acknowledged message which RabbitMQ can send to +# notification listener. (integer value) +#notification_listener_prefetch_count = 100 + +# Reconnecting retry count in case of connectivity problem during +# sending notification, -1 means infinite retry. (integer value) +#default_notification_retry_attempts = -1 + +# Reconnecting retry delay in case of connectivity problem during +# sending notification message (floating point value) +#notification_retry_delay = 0.25 + +# Time to live for rpc queues without consumers in seconds. (integer +# value) +#rpc_queue_expiration = 60 + +# Exchange name for sending RPC messages (string value) +#default_rpc_exchange = ${control_exchange}_rpc + +# Exchange name for receiving RPC replies (string value) +#rpc_reply_exchange = ${control_exchange}_rpc_reply + +# Max number of not acknowledged message which RabbitMQ can send to +# rpc listener. (integer value) +#rpc_listener_prefetch_count = 100 + +# Max number of not acknowledged message which RabbitMQ can send to +# rpc reply listener. (integer value) +#rpc_reply_listener_prefetch_count = 100 + +# Reconnecting retry count in case of connectivity problem during +# sending reply. -1 means infinite retry during rpc_timeout (integer +# value) +#rpc_reply_retry_attempts = -1 + +# Reconnecting retry delay in case of connectivity problem during +# sending reply. (floating point value) +#rpc_reply_retry_delay = 0.25 + +# Reconnecting retry count in case of connectivity problem during +# sending RPC message, -1 means infinite retry. If actual retry +# attempts in not 0 the rpc request could be processed more then one +# time (integer value) +#default_rpc_retry_attempts = -1 + +# Reconnecting retry delay in case of connectivity problem during +# sending RPC message (floating point value) +#rpc_retry_delay = 0.25 + + +[pecan] + +# +# From mistral.config +# + +# Pecan root controller (string value) +#root = mistral.api.controllers.root.RootController + +# A list of modules where pecan will search for applications. (list +# value) +#modules = mistral.api + +# Enables the ability to display tracebacks in the browser and +# interactively debug during development. (boolean value) +#debug = false + +# Enables user authentication in pecan. (boolean value) +#auth_enable = true diff --git a/src/templates/mitaka/policy.json b/src/templates/mitaka/policy.json new file mode 100644 index 0000000..3278023 --- /dev/null +++ b/src/templates/mitaka/policy.json @@ -0,0 +1,64 @@ +{ + "admin_only": "is_admin:True", + "admin_or_owner": "is_admin:True or project_id:%(project_id)s", + "default": "rule:admin_or_owner", + + "action_executions:delete": "rule:admin_or_owner", + "action_execution:create": "rule:admin_or_owner", + "action_executions:get": "rule:admin_or_owner", + "action_executions:list": "rule:admin_or_owner", + "action_executions:update": "rule:admin_or_owner", + + "actions:create": "rule:admin_or_owner", + "actions:delete": "rule:admin_or_owner", + "actions:get": "rule:admin_or_owner", + "actions:list": "rule:admin_or_owner", + "actions:update": "rule:admin_or_owner", + + "cron_triggers:create": "rule:admin_or_owner", + "cron_triggers:delete": "rule:admin_or_owner", + "cron_triggers:get": "rule:admin_or_owner", + "cron_triggers:list": "rule:admin_or_owner", + + "environments:create": "rule:admin_or_owner", + "environments:delete": "rule:admin_or_owner", + "environments:get": "rule:admin_or_owner", + "environments:list": "rule:admin_or_owner", + "environments:update": "rule:admin_or_owner", + + "executions:create": "rule:admin_or_owner", + "executions:delete": "rule:admin_or_owner", + "executions:get": "rule:admin_or_owner", + "executions:list": "rule:admin_or_owner", + "executions:update": "rule:admin_or_owner", + + "members:create": "rule:admin_or_owner", + "members:delete": "rule:admin_or_owner", + "members:get": "rule:admin_or_owner", + "members:list": "rule:admin_or_owner", + "members:update": "rule:admin_or_owner", + + "services:list": "rule:admin_or_owner", + + "tasks:get": "rule:admin_or_owner", + "tasks:list": "rule:admin_or_owner", + "tasks:update": "rule:admin_or_owner", + + "workbooks:create": "rule:admin_or_owner", + "workbooks:delete": "rule:admin_or_owner", + "workbooks:get": "rule:admin_or_owner", + "workbooks:list": "rule:admin_or_owner", + "workbooks:update": "rule:admin_or_owner", + + "workflows:create": "rule:admin_or_owner", + "workflows:delete": "rule:admin_or_owner", + "workflows:get": "rule:admin_or_owner", + "workflows:list": "rule:admin_or_owner", + "workflows:update": "rule:admin_or_owner", + + "event_triggers:create": "rule:admin_or_owner", + "event_triggers:delete": "rule:admin_or_owner", + "event_triggers:get": "rule:admin_or_owner", + "event_triggers:list": "rule:admin_or_owner", + "event_triggers:update": "rule:admin_or_owner" +} diff --git a/src/templates/mitaka/wf_trace_logging.conf b/src/templates/mitaka/wf_trace_logging.conf new file mode 100644 index 0000000..9b02596 --- /dev/null +++ b/src/templates/mitaka/wf_trace_logging.conf @@ -0,0 +1,47 @@ +[loggers] +keys=workflow_trace,root + +[handlers] +keys=consoleHandler, wfTraceFileHandler, fileHandler + +[formatters] +keys=wfFormatter, simpleFormatter, verboseFormatter + +[logger_workflow_trace] +level=INFO +handlers=consoleHandler, wfTraceFileHandler +qualname=workflow_trace + +[logger_root] +level=INFO +handlers=fileHandler + +[handler_fileHandler] +class=FileHandler +level=INFO +formatter=verboseFormatter +args=("/var/log/mistral.log",) + +[handler_consoleHandler] +class=StreamHandler +level=INFO +formatter=simpleFormatter +args=(sys.stdout,) + +[handler_wfTraceFileHandler] +class=FileHandler +level=INFO +formatter=wfFormatter +args=("/var/log/mistral_wf_trace.log",) + +[formatter_verboseFormatter] +format=%(asctime)s %(thread)s %(levelname)s %(module)s [-] %(message)s +datefmt= + +[formatter_simpleFormatter] +format=%(asctime)s %(levelname)s [-] %(message)s +datefmt= + +[formatter_wfFormatter] +format=%(asctime)s WF [-] %(message)s +datefmt= diff --git a/src/tests/README.md b/src/tests/README.md new file mode 100644 index 0000000..a8d9e14 --- /dev/null +++ b/src/tests/README.md @@ -0,0 +1,9 @@ +# Overview + +This directory provides Amulet tests to verify basic deployment functionality +from the perspective of this charm, its requirements and its features, as +exercised in a subset of the full OpenStack deployment test bundle topology. + +For full details on functional testing of OpenStack charms please refer to +the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing) +section of the OpenStack Charm Guide. \ No newline at end of file diff --git a/src/tests/basic_deployment.py b/src/tests/basic_deployment.py new file mode 100644 index 0000000..4e3d967 --- /dev/null +++ b/src/tests/basic_deployment.py @@ -0,0 +1,181 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import amulet +import json +import subprocess +import time + + +import charmhelpers.contrib.openstack.amulet.deployment as amulet_deployment +import charmhelpers.contrib.openstack.amulet.utils as os_amulet_utils + +# Use DEBUG to turn on debug logging +u = os_amulet_utils.OpenStackAmuletUtils(os_amulet_utils.DEBUG) + + +class SDNCharmDeployment(amulet_deployment.OpenStackAmuletDeployment): + """Amulet tests on a basic sdn_charm deployment.""" + + def __init__(self, series, openstack=None, source=None, stable=False): + """Deploy the entire test environment.""" + super(SDNCharmDeployment, self).__init__(series, openstack, + source, stable) + self._add_services() + self._add_relations() + self._configure_services() + self._deploy() + + u.log.info('Waiting on extended status checks...') + exclude_services = ['mysql', 'mongodb'] + self._auto_wait_for_status(exclude_services=exclude_services) + + self._initialize_tests() + + def _add_services(self): + """Add services + + Add the services that we're testing, where sdn_charm is local, + and the rest of the service are from lp branches that are + compatible with the local charm (e.g. stable or next). + """ + this_service = {'name': 'sdn_charm'} + other_services = [ + { + 'name': 'nova-compute', + 'constraints': {'mem': '4G'}, + }, + { + 'name': 'neutron-api', + }, + { + 'name': 'neutron-gateway', + }, + {'name': 'mysql'}, + {'name': 'rabbitmq-server'}, + {'name': 'keystone'}, + {'name': 'nova-cloud-controller'}, + {'name': 'glance'}, + ] + super(SDNCharmDeployment, self)._add_services(this_service, + other_services) + + def _add_relations(self): + """Add all of the relations for the services.""" + relations = { + 'nova-compute:neutron-plugin': 'sdn_charm:neutron-plugin', + 'keystone:shared-db': 'mysql:shared-db', + 'nova-cloud-controller:shared-db': 'mysql:shared-db', + 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp', + 'nova-cloud-controller:image-service': 'glance:image-service', + 'nova-cloud-controller:identity-service': + 'keystone:identity-service', + 'nova-compute:cloud-compute': + 'nova-cloud-controller:cloud-compute', + 'nova-compute:amqp': 'rabbitmq-server:amqp', + 'nova-compute:image-service': 'glance:image-service', + 'glance:shared-db': 'mysql:shared-db', + 'glance:identity-service': 'keystone:identity-service', + 'glance:amqp': 'rabbitmq-server:amqp', + 'neutron-api:shared-db': 'mysql:shared-db', + 'neutron-api:amqp': 'rabbitmq-server:amqp', + 'neutron-api:neutron-api': 'nova-cloud-controller:neutron-api', + 'neutron-api:identity-service': 'keystone:identity-service', + 'neutron-gateway:amqp': 'rabbitmq-server:amqp', + 'neutron-gateway:neutron-plugin-api': + 'neutron-api:neutron-plugin-api', + 'neutron-gateway:quantum-network-service': + 'nova-cloud-controller:quantum-network-service', + 'neutron-gateway:juju-info': 'sdn_charm:container', + } + super(SDNCharmDeployment, self)._add_relations(relations) + + def _configure_services(self): + """Configure all of the services.""" + keystone_config = {'admin-password': 'openstack', + 'admin-token': 'ubuntutesting'} + configs = {'keystone': keystone_config} + super(SDNCharmDeployment, self)._configure_services(configs) + + def _get_token(self): + return self.keystone.service_catalog.catalog['token']['id'] + + def _initialize_tests(self): + """Perform final initialization before tests get run.""" + # Access the sentries for inspecting service units + self.sdn_charm_sentry = self.d.sentry['sdn_charm'][0] + self.mysql_sentry = self.d.sentry['mysql'][0] + self.keystone_sentry = self.d.sentry['keystone'][0] + self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0] + self.sdn_charm_svcs = [ + 'sdn_charm-agent', 'sdn_charm-api'] + + # Authenticate admin with keystone endpoint + self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, + user='admin', + password='openstack', + tenant='admin') + + def check_and_wait(self, check_command, interval=2, max_wait=200, + desc=None): + waited = 0 + while not check_command() or waited > max_wait: + if desc: + u.log.debug(desc) + time.sleep(interval) + waited = waited + interval + if waited > max_wait: + raise Exception('cmd failed {}'.format(check_command)) + + def _run_action(self, unit_id, action, *args): + command = ["juju", "action", "do", "--format=json", unit_id, action] + command.extend(args) + print("Running command: %s\n" % " ".join(command)) + output = subprocess.check_output(command) + output_json = output.decode(encoding="UTF-8") + data = json.loads(output_json) + action_id = data[u'Action queued with id'] + return action_id + + def _wait_on_action(self, action_id): + command = ["juju", "action", "fetch", "--format=json", action_id] + while True: + try: + output = subprocess.check_output(command) + except Exception as e: + print(e) + return False + output_json = output.decode(encoding="UTF-8") + data = json.loads(output_json) + if data[u"status"] == "completed": + return True + elif data[u"status"] == "failed": + return False + time.sleep(2) + + def test_100_services(self): + """Verify the expected services are running on the corresponding + service units.""" + u.log.debug('Checking system services on units...') + + service_names = { + self.sdn_charm_sentry: self.sdn_charm_svcs, + } + + ret = u.validate_services_by_name(service_names) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + u.log.debug('OK') \ No newline at end of file diff --git a/src/tests/gate-basic-trusty-icehouse b/src/tests/gate-basic-trusty-icehouse new file mode 100755 index 0000000..bc30915 --- /dev/null +++ b/src/tests/gate-basic-trusty-icehouse @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic SDN Charm deployment on trusty-icehouse.""" + +from basic_deployment import SDNCharmDeployment + +if __name__ == '__main__': + deployment = SDNCharmDeployment(series='trusty') + deployment.run_tests() \ No newline at end of file diff --git a/src/tests/gate-basic-trusty-liberty b/src/tests/gate-basic-trusty-liberty new file mode 100755 index 0000000..f4fe1ff --- /dev/null +++ b/src/tests/gate-basic-trusty-liberty @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic SDN Charm deployment on trusty-liberty.""" + +from basic_deployment import SDNCharmDeployment + +if __name__ == '__main__': + deployment = SDNCharmDeployment(series='trusty', + openstack='cloud:trusty-liberty', + source='cloud:trusty-updates/liberty') + deployment.run_tests() \ No newline at end of file diff --git a/src/tests/gate-basic-trusty-mitaka b/src/tests/gate-basic-trusty-mitaka new file mode 100755 index 0000000..9142a10 --- /dev/null +++ b/src/tests/gate-basic-trusty-mitaka @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic SDN Charm deployment on trusty-mitaka.""" + +from basic_deployment import SDNCharmDeployment + +if __name__ == '__main__': + deployment = SDNCharmDeployment(series='trusty', + openstack='cloud:trusty-mitaka', + source='cloud:trusty-updates/mitaka') + deployment.run_tests() \ No newline at end of file diff --git a/src/tests/gate-basic-xenial-mitaka b/src/tests/gate-basic-xenial-mitaka new file mode 100755 index 0000000..37725c2 --- /dev/null +++ b/src/tests/gate-basic-xenial-mitaka @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Amulet tests on a basic SDN Charm deployment on xenial-mitaka.""" + +from basic_deployment import SDNCharmDeployment + +if __name__ == '__main__': + deployment = SDNCharmDeployment(series='xenial') + deployment.run_tests() \ No newline at end of file diff --git a/src/tests/tests.yaml b/src/tests/tests.yaml new file mode 100644 index 0000000..8ba143b --- /dev/null +++ b/src/tests/tests.yaml @@ -0,0 +1,17 @@ +# Bootstrap the model if necessary. +bootstrap: True +# Re-use bootstrap node instead of destroying/re-bootstrapping. +reset: True +# Use tox/requirements to drive the venv instead of bundletester's venv feature. +virtualenv: False +# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet. +makefile: [] +# Do not specify juju PPA sources. Juju is presumed to be pre-installed +# and configured in all test runner environments. +#sources: +# Do not specify or rely on system packages. +#packages: +# Do not specify python packages here. Use test-requirements.txt +# and tox instead. ie. The venv is constructed before bundletester +# is invoked. +#python-packages: \ No newline at end of file diff --git a/src/tox.ini b/src/tox.ini new file mode 100644 index 0000000..8b9f734 --- /dev/null +++ b/src/tox.ini @@ -0,0 +1,47 @@ +# Source charm: ./tox.ini +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. +[tox] +skipsdist = True +envlist = pep8,py34,py35 +skip_missing_interpreters = True + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 + TERM=linux + LAYER_PATH={toxinidir}/layers + INTERFACE_PATH={toxinidir}/interfaces + JUJU_REPOSITORY={toxinidir}/build +passenv = http_proxy https_proxy +install_command = + pip install {opts} {packages} +deps = + -r{toxinidir}/requirements.txt + +[testenv:build] +basepython = python2.7 +commands = + charm-build --log-level DEBUG -o {toxinidir}/build src {posargs} + +[testenv:py34] +basepython = python3.4 +deps = -r{toxinidir}/test-requirements.txt +commands = ostestr {posargs} + +[testenv:py35] +basepython = python3.5 +deps = -r{toxinidir}/test-requirements.txt +commands = ostestr {posargs} + +[testenv:pep8] +basepython = python2.7 +deps = -r{toxinidir}/test-requirements.txt +commands = flake8 {posargs} src unit_tests + +[testenv:venv] +commands = {posargs} + +[flake8] +# E402 ignore necessary for path append before sys module import in actions +ignore = E402 \ No newline at end of file diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 0000000..4c9c8dd --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,7 @@ +# Lint and unit test requirements +flake8 +os-testr>=0.4.1 +charms.reactive +mock>=1.2 +coverage>=3.6 +git+https://github.com/openstack/charms.openstack.git#egg=charms-openstack \ No newline at end of file diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..8b9f734 --- /dev/null +++ b/tox.ini @@ -0,0 +1,47 @@ +# Source charm: ./tox.ini +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. +[tox] +skipsdist = True +envlist = pep8,py34,py35 +skip_missing_interpreters = True + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 + TERM=linux + LAYER_PATH={toxinidir}/layers + INTERFACE_PATH={toxinidir}/interfaces + JUJU_REPOSITORY={toxinidir}/build +passenv = http_proxy https_proxy +install_command = + pip install {opts} {packages} +deps = + -r{toxinidir}/requirements.txt + +[testenv:build] +basepython = python2.7 +commands = + charm-build --log-level DEBUG -o {toxinidir}/build src {posargs} + +[testenv:py34] +basepython = python3.4 +deps = -r{toxinidir}/test-requirements.txt +commands = ostestr {posargs} + +[testenv:py35] +basepython = python3.5 +deps = -r{toxinidir}/test-requirements.txt +commands = ostestr {posargs} + +[testenv:pep8] +basepython = python2.7 +deps = -r{toxinidir}/test-requirements.txt +commands = flake8 {posargs} src unit_tests + +[testenv:venv] +commands = {posargs} + +[flake8] +# E402 ignore necessary for path append before sys module import in actions +ignore = E402 \ No newline at end of file diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py new file mode 100644 index 0000000..1ca5eb6 --- /dev/null +++ b/unit_tests/__init__.py @@ -0,0 +1,46 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import mock + +sys.path.append('src') +sys.path.append('src/lib') + +# Mock out charmhelpers so that we can test without it. +# also stops sideeffects from occuring. +charmhelpers = mock.MagicMock() +apt_pkg = mock.MagicMock() +sys.modules['apt_pkg'] = apt_pkg +sys.modules['charmhelpers'] = charmhelpers +sys.modules['charmhelpers.core'] = charmhelpers.core +sys.modules['charmhelpers.core.decorators'] = charmhelpers.core.decorators +sys.modules['charmhelpers.core.hookenv'] = charmhelpers.core.hookenv +sys.modules['charmhelpers.core.host'] = charmhelpers.core.host +sys.modules['charmhelpers.core.unitdata'] = charmhelpers.core.unitdata +sys.modules['charmhelpers.core.templating'] = charmhelpers.core.templating +sys.modules['charmhelpers.contrib'] = charmhelpers.contrib +sys.modules['charmhelpers.contrib.openstack'] = charmhelpers.contrib.openstack +sys.modules['charmhelpers.contrib.openstack.utils'] = ( + charmhelpers.contrib.openstack.utils) +sys.modules['charmhelpers.contrib.openstack.templating'] = ( + charmhelpers.contrib.openstack.templating) +sys.modules['charmhelpers.contrib.network'] = charmhelpers.contrib.network +sys.modules['charmhelpers.contrib.network.ip'] = ( + charmhelpers.contrib.network.ip) +sys.modules['charmhelpers.fetch'] = charmhelpers.fetch +sys.modules['charmhelpers.cli'] = charmhelpers.cli +sys.modules['charmhelpers.contrib.hahelpers'] = charmhelpers.contrib.hahelpers +sys.modules['charmhelpers.contrib.hahelpers.cluster'] = ( + charmhelpers.contrib.hahelpers.cluster) \ No newline at end of file diff --git a/unit_tests/test_lib_charm_openstack_openvswitch_odl.py b/unit_tests/test_lib_charm_openstack_openvswitch_odl.py new file mode 100644 index 0000000..7c4385b --- /dev/null +++ b/unit_tests/test_lib_charm_openstack_openvswitch_odl.py @@ -0,0 +1,47 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import print_function + +import unittest + +import mock + +import charm.openstack.sdn_charm as sdn_charm + + +class Helper(unittest.TestCase): + + def setUp(self): + self._patches = {} + self._patches_start = {} + + def tearDown(self): + for k, v in self._patches.items(): + v.stop() + setattr(self, k, None) + self._patches = None + self._patches_start = None + + def patch(self, obj, attr, return_value=None, **kwargs): + mocked = mock.patch.object(obj, attr, **kwargs) + self._patches[attr] = mocked + started = mocked.start() + started.return_value = return_value + self._patches_start[attr] = started + setattr(self, attr, started) + + +class TestSDNCharm(Helper): \ No newline at end of file diff --git a/unit_tests/test_openvswitch_odl_handlers.py b/unit_tests/test_openvswitch_odl_handlers.py new file mode 100644 index 0000000..f107ee6 --- /dev/null +++ b/unit_tests/test_openvswitch_odl_handlers.py @@ -0,0 +1,43 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import print_function + +import mock + +import reactive.sdn_charm_handlers as handlers + +import charms_openstack.test_utils as test_utils + + +class TestRegisteredHooks(test_utils.TestRegisteredHooks): + + def test_hooks(self): + defaults = [ + 'charm.installed', + 'config.changed', + 'update-status'] + hook_set = { + 'when': { + }, + 'when_not': { + } + } + # test that the hooks were registered via the + # reactive.barbican_handlers + self.registered_hooks_test_helper(handlers, hook_set, defaults) + + +class TestSDNCharmHandles(test_utils.PatchHelper): \ No newline at end of file