From d7b26af44d2877676eaa66cf2be114ec9a52915f Mon Sep 17 00:00:00 2001 From: hernandanielg Date: Fri, 30 Sep 2022 14:27:27 +0000 Subject: [PATCH] Cloudkitty charm This charm enables the deployment of the cloudkitty service in the Openstack environment, cloudkitty service provides rating as a service based on metrics, it handles field mapping rules to calculate costs and generate reports for the end-user. Cloudkitty charm can relate to mysql, rabbitmq-server, gnocchi and keystone. func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/929 Change-Id: I35353c24c779b226d5edd2641a3177258849122b --- .flake8 | 9 + .gitignore | 11 + .jujuignore | 5 + .stestr.conf | 3 + .zuul.yaml | 5 + LICENSE | 202 ++++++++ README.md | 188 +++++++ actions.yaml | 5 + build-requirements.txt | 7 + charmcraft.yaml | 33 ++ config.yaml | 9 + .../v0/database_requires.py | 488 ++++++++++++++++++ .../openstack_libs/v0/gnocchi_requires.py | 196 +++++++ .../openstack_libs/v0/keystone_requires.py | 381 ++++++++++++++ .../openstack_libs/v0/rabbitmq_requires.py | 217 ++++++++ lib/charms/operator_libs_linux/v1/systemd.py | 219 ++++++++ metadata.yaml | 24 + osci.yaml | 9 + rename.sh | 14 + requirements.txt | 2 + run_tests | 15 + src/charm.py | 251 +++++++++ templates/cloudkitty.conf | 78 +++ test-requirements.txt | 10 + tests/README.md | 18 + tests/bundles/focal-yoga.yaml | 285 ++++++++++ tests/bundles/jammy-yoga.yaml | 287 ++++++++++ tests/tests.yaml | 28 + tox.ini | 141 +++++ unit_tests/__init__.py | 2 + unit_tests/test_charm.py | 130 +++++ unit_tests/test_utils.py | 172 ++++++ 32 files changed, 3444 insertions(+) create mode 100644 .flake8 create mode 100644 .gitignore create mode 100644 .jujuignore create mode 100644 .stestr.conf create mode 100644 .zuul.yaml create mode 100644 LICENSE create mode 100644 README.md create mode 100644 actions.yaml create mode 100644 build-requirements.txt create mode 100644 charmcraft.yaml create mode 100644 config.yaml create mode 100644 lib/charms/data_platform_libs/v0/database_requires.py create mode 100644 lib/charms/openstack_libs/v0/gnocchi_requires.py create mode 100644 lib/charms/openstack_libs/v0/keystone_requires.py create mode 100644 lib/charms/openstack_libs/v0/rabbitmq_requires.py create mode 100644 lib/charms/operator_libs_linux/v1/systemd.py create mode 100644 metadata.yaml create mode 100644 osci.yaml create mode 100755 rename.sh create mode 100644 requirements.txt create mode 100755 run_tests create mode 100755 src/charm.py create mode 100644 templates/cloudkitty.conf create mode 100644 test-requirements.txt create mode 100644 tests/README.md create mode 100644 tests/bundles/focal-yoga.yaml create mode 100644 tests/bundles/jammy-yoga.yaml create mode 100644 tests/tests.yaml create mode 100644 tox.ini create mode 100644 unit_tests/__init__.py create mode 100644 unit_tests/test_charm.py create mode 100644 unit_tests/test_utils.py diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..8ef84fc --- /dev/null +++ b/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d3ae31b --- /dev/null +++ b/.gitignore @@ -0,0 +1,11 @@ +venv/ +build/ +bin/ +*.charm +.stestr/ +.tox/ +.coverage +__pycache__/ +*.py[cod] +cover/ +.coverage* \ No newline at end of file diff --git a/.jujuignore b/.jujuignore new file mode 100644 index 0000000..d72066a --- /dev/null +++ b/.jujuignore @@ -0,0 +1,5 @@ +/venv +*.py[cod] +*.charm +.stestr/ +.tox/ diff --git a/.stestr.conf b/.stestr.conf new file mode 100644 index 0000000..5fcccac --- /dev/null +++ b/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./unit_tests +top_dir=./ diff --git a/.zuul.yaml b/.zuul.yaml new file mode 100644 index 0000000..ed74761 --- /dev/null +++ b/.zuul.yaml @@ -0,0 +1,5 @@ +- project: + templates: + - openstack-python3-charm-yoga-jobs + - openstack-cover-jobs + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..39bee56 --- /dev/null +++ b/README.md @@ -0,0 +1,188 @@ +# Cloudkitty + +Cloudkitty charm - Openstack Rating as a Service + +## Overview + +This charm provides a way to deploy Cloudkitty - Openstack Rating as a Service module - in Openstack + +**What is CloudKitty ?** + +CloudKitty is a generic solution for the chargeback and rating of a cloud. Provides a metric-based rating for cloud administrators allowing them to create rating rules to the collected data. + + +**CloudKitty usage** + +With Cloudkitty, it is possible to: + +* Collect metrics from OpenStack (through Gnocchi). +* Apply rating rules to the previous metrics. +* Retrieve the rated information, grouped by scope and/or by metric type. + +However, it is not possible to: + +* Limit resources in other OpenStack services. +* Add taxes, convert between currencies, etc... + +CloudKitty associates a price to a metric for a given period, the price is mapped according to end-user needs. + + +## Configuration + +Cloudkitty charm configuration options + +* `debug`\ +to run service in debug mode change debug config value + ``` + juju config cloudkitty debug=true + ``` +* `region`\ +set the openstack cloud region, if value required to be changed preferably to specify in a bundle + ``` + cloudkitty: + charm: ch:cloudkity + options: + region: MyRegion + ``` + +To display all configuration option information run `juju config +cloudkitty`. If the application is not deployed then see the charm's +[configuration file](config.yaml). + +## Deployment + +Deploy cloudkitty charm + +``` +juju deploy cloudkitty --channel edge +``` + +Or in a bundle +``` +applications: + cloudkitty: + charm: ch:cloudkitty + channel: edge + num_units: 1 + series: jammy +``` + +## Relations + +Cloudkitty charm supports the following relations. + +MySQL relation - relation to [mysql-operator](https://github.com/canonical/mysql-operator) charm - provides database storage for the cloudkitty service. + +**NOTE:** This charm is not backward compatible with legacy `mysql-innodb-cluster` charm + +``` +juju deploy mysql --channel edge +juju relate cloudkitty mysql +``` + +Keystone relation - provides identity management. + +``` +juju deploy keystone +juju relate cloudkitty keystone +``` + +Gnocchi relation - provides metrics collector service. +``` +juju deploy gnocchi +juju relate cloudkitty gnocchi +``` + +RabbitMQ relation - provides messages queue service. +``` +juju deploy rabbitmq-server +juju relate cloudkitty rabbitmq-server +``` + +## Actions +This section lists Juju [actions](https://jaas.ai/docs/actions) supported by the charm. Actions allow specific operations to be performed on a per-unit basis. + +* `restart-services`\ +restarts `cloudkitty-{api,processor}` services in the unit. + + ``` + juju run-action --wait cloudkitty/leader restart-services + ``` + +## Usage + +To interact with the service we should use the built-in openstack cloudkitty client in the [openstackclients package](https://snapcraft.io/openstackclients) + +Check clients usage like this + +``` +openstack rating --help +``` + +First enable `hashmap` module +``` +$ openstack rating module enable hashmap +``` + +Then start by creating a service called image for example +``` +$ openstack rating hashmap service create image +``` + +Create a field called `flavor_id` as an example, and associate it with the service using the service ID +``` +$ openstack rating hashmap field create flavor_id +``` + +Map the field with a value of the specific field, a flavor id +``` +$ openstack flavor list ++---------+-----------+-------+------+-----------+-------+-----------+ +| ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public | ++---------+-----------+-------+------+-----------+-------+-----------+ +| 123abc | m1.tiny | 512 | 8 | 40 | 1 | True | ++---------+-----------+-------+------+-----------+-------+-----------+ +``` + +Create the mapping of type `flat` and let's assign a cost of `1.2` +``` +$ openstack rating hashmap mapping create --type flat --field-id --value 123abc 1.2 +``` + +Finally check the summary report +``` +$ openstack rating summary get +``` + +## TO-DO + +This charm is under development not yet stable, the following list provides pending features + +* Enable TLS support using [[TLS interface]](https://opendev.org/openstack/charm-ops-interface-tls-certificates/src/branch/master/interface_tls_certificates/ca_client.py) + +* InfluxDB relation required for [storage v2](https://docs.openstack.org/cloudkitty/latest/admin/configuration/storage.html#influxdb-v2) + +* Cloudkitty dashboard charm relation + +* High availability + + +## Contributing + +Please see the [Juju SDK docs](https://juju.is/docs/sdk) for guidelines +on enhancements to this charm following best practice guidelines, and +`CONTRIBUTING.md` for developer guidance. + +Follow Openstack best practices for [Software contributions](https://docs.openstack.org/charm-guide/latest/community/software-contrib/index.html) in charm development. + + +# Bugs + +Please report bugs on [Launchpad][lp-bugs-charm-cloudkitty]. + +For general charm questions refer to the [OpenStack Charm Guide][cg]. + + +[cg]: https://docs.openstack.org/charm-guide +[lp-bugs-charm-cloudkitty]: https://bugs.launchpad.net/charm-cloudkitty/+filebug + diff --git a/actions.yaml b/actions.yaml new file mode 100644 index 0000000..e4b39e2 --- /dev/null +++ b/actions.yaml @@ -0,0 +1,5 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. +restart-services: + description: > + Restart cloudkitty-api and cloudkitty-processor services via systemd diff --git a/build-requirements.txt b/build-requirements.txt new file mode 100644 index 0000000..b6d2452 --- /dev/null +++ b/build-requirements.txt @@ -0,0 +1,7 @@ +# NOTES(lourot): +# * We don't install charmcraft via pip anymore because it anyway spins up a +# container and scp the system's charmcraft snap inside it. So the charmcraft +# snap is necessary on the system anyway. +# * `tox -e build` successfully validated with charmcraft 1.2.1 + +cffi==1.14.6; python_version < '3.6' # cffi 1.15.0 drops support for py35. diff --git a/charmcraft.yaml b/charmcraft.yaml new file mode 100644 index 0000000..07d4244 --- /dev/null +++ b/charmcraft.yaml @@ -0,0 +1,33 @@ +type: charm + +parts: + charm: + after: [update-certificates] + charm-python-packages: + # See https://github.com/canonical/charmcraft/issues/551 + - setuptools + build-packages: + - git + + update-certificates: + plugin: nil + # See https://github.com/canonical/charmcraft/issues/658 + override-build: | + apt update -qqq + apt install -qy ca-certificates + update-ca-certificates +bases: + - build-on: + - name: ubuntu + channel: "20.04" + architectures: [amd64] + run-on: + - name: ubuntu + channel: "22.04" + architectures: [amd64, s390x, ppc64el, arm64] + run-on: + - name: ubuntu + channel: "20.04" + architectures: [amd64] + architectures: [amd64, s390x, ppc64el, arm64] + diff --git a/config.yaml b/config.yaml new file mode 100644 index 0000000..6285111 --- /dev/null +++ b/config.yaml @@ -0,0 +1,9 @@ +options: + debug: + default: False + description: Enable debugging. + type: boolean + region: + type: string + default: RegionOne + description: OpenStack Region diff --git a/lib/charms/data_platform_libs/v0/database_requires.py b/lib/charms/data_platform_libs/v0/database_requires.py new file mode 100644 index 0000000..098be83 --- /dev/null +++ b/lib/charms/data_platform_libs/v0/database_requires.py @@ -0,0 +1,488 @@ +# Copyright 2022 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Relation 'requires' side abstraction for database relation. + +This library is a uniform interface to a selection of common database +metadata, with added custom events that add convenience to database management, +and methods to consume the application related data. + +Following an example of using the DatabaseCreatedEvent, in the context of the +application charm code: + +```python + +from charms.data_platform_libs.v0.database_requires import DatabaseRequires + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Charm events defined in the database requires charm library. + self.database = DatabaseRequires(self, relation_name="database", database_name="database") + self.framework.observe(self.database.on.database_created, self._on_database_created) + + def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + + # Start application with rendered configuration + self._start_application(config_file) + + # Set active status + self.unit.status = ActiveStatus("received database credentials") +``` + +As shown above, the library provides some custom events to handle specific situations, +which are listed below: + +— database_created: event emitted when the requested database is created. +— endpoints_changed: event emitted when the read/write endpoints of the database have changed. +— read_only_endpoints_changed: event emitted when the read-only endpoints of the database + have changed. + +If it is needed to connect multiple database clusters to the same relation endpoint +the application charm can implement the same code as if it would connect to only +one database cluster (like the above code example). + +To differentiate multiple clusters connected to the same relation endpoint +the application charm can use the name of the remote application: + +```python + +def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Get the remote app name of the cluster that triggered this event + cluster = event.relation.app.name +``` + +It is also possible to provide an alias for each different database cluster/relation. + +So, it is possible to differentiate the clusters in two ways. +The first is to use the remote application name, i.e., `event.relation.app.name`, as above. + +The second way is to use different event handlers to handle each cluster events. +The implementation would be something like the following code: + +```python + +from charms.data_platform_libs.v0.database_requires import DatabaseRequires + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Define the cluster aliases and one handler for each cluster database created event. + self.database = DatabaseRequires( + self, + relation_name="database", + database_name="database", + relations_aliases = ["cluster1", "cluster2"], + ) + self.framework.observe( + self.database.on.cluster1_database_created, self._on_cluster1_database_created + ) + self.framework.observe( + self.database.on.cluster2_database_created, self._on_cluster2_database_created + ) + + def _on_cluster1_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster1 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + + def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster2 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + +``` +""" + +import json +import logging +from collections import namedtuple +from datetime import datetime +from typing import List, Optional + +from ops.charm import ( + CharmEvents, + RelationChangedEvent, + RelationEvent, + RelationJoinedEvent, +) +from ops.framework import EventSource, Object +from ops.model import Relation + +# The unique Charmhub library identifier, never change it +LIBID = "0241e088ffa9440fb4e3126349b2fb62" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version. +LIBPATCH = 3 + +logger = logging.getLogger(__name__) + + +class DatabaseEvent(RelationEvent): + """Base class for database events.""" + + @property + def endpoints(self) -> Optional[str]: + """Returns a comma separated list of read/write endpoints.""" + return self.relation.data[self.relation.app].get("endpoints") + + @property + def password(self) -> Optional[str]: + """Returns the password for the created user.""" + return self.relation.data[self.relation.app].get("password") + + @property + def read_only_endpoints(self) -> Optional[str]: + """Returns a comma separated list of read only endpoints.""" + return self.relation.data[self.relation.app].get("read-only-endpoints") + + @property + def replset(self) -> Optional[str]: + """Returns the replicaset name. + + MongoDB only. + """ + return self.relation.data[self.relation.app].get("replset") + + @property + def tls(self) -> Optional[str]: + """Returns whether TLS is configured.""" + return self.relation.data[self.relation.app].get("tls") + + @property + def tls_ca(self) -> Optional[str]: + """Returns TLS CA.""" + return self.relation.data[self.relation.app].get("tls-ca") + + @property + def uris(self) -> Optional[str]: + """Returns the connection URIs. + + MongoDB, Redis, OpenSearch and Kafka only. + """ + return self.relation.data[self.relation.app].get("uris") + + @property + def username(self) -> Optional[str]: + """Returns the created username.""" + return self.relation.data[self.relation.app].get("username") + + @property + def version(self) -> Optional[str]: + """Returns the version of the database. + + Version as informed by the database daemon. + """ + return self.relation.data[self.relation.app].get("version") + + +class DatabaseCreatedEvent(DatabaseEvent): + """Event emitted when a new database is created for use on this relation.""" + + +class DatabaseEndpointsChangedEvent(DatabaseEvent): + """Event emitted when the read/write endpoints are changed.""" + + +class DatabaseReadOnlyEndpointsChangedEvent(DatabaseEvent): + """Event emitted when the read only endpoints are changed.""" + + +class DatabaseEvents(CharmEvents): + """Database events. + + This class defines the events that the database can emit. + """ + + database_created = EventSource(DatabaseCreatedEvent) + endpoints_changed = EventSource(DatabaseEndpointsChangedEvent) + read_only_endpoints_changed = EventSource(DatabaseReadOnlyEndpointsChangedEvent) + + +Diff = namedtuple("Diff", "added changed deleted") +Diff.__doc__ = """ +A tuple for storing the diff between two data mappings. + +— added — keys that were added. +— changed — keys that still exist but have new values. +— deleted — keys that were deleted. +""" + + +class DatabaseRequires(Object): + """Requires-side of the database relation.""" + + on = DatabaseEvents() + + def __init__( + self, + charm, + relation_name: str, + database_name: str, + extra_user_roles: str = None, + relations_aliases: List[str] = None, + ): + """Manager of database client relations.""" + super().__init__(charm, relation_name) + self.charm = charm + self.database = database_name + self.extra_user_roles = extra_user_roles + self.local_app = self.charm.model.app + self.local_unit = self.charm.unit + self.relation_name = relation_name + self.relations_aliases = relations_aliases + self.framework.observe( + self.charm.on[relation_name].relation_joined, self._on_relation_joined_event + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, self._on_relation_changed_event + ) + + # Define custom event names for each alias. + if relations_aliases: + # Ensure the number of aliases does not exceed the maximum + # of connections allowed in the specific relation. + relation_connection_limit = self.charm.meta.requires[relation_name].limit + if len(relations_aliases) != relation_connection_limit: + raise ValueError( + f"The number of aliases must match the maximum number of connections allowed in the relation. " + f"Expected {relation_connection_limit}, got {len(relations_aliases)}" + ) + + for relation_alias in relations_aliases: + self.on.define_event(f"{relation_alias}_database_created", DatabaseCreatedEvent) + self.on.define_event( + f"{relation_alias}_endpoints_changed", DatabaseEndpointsChangedEvent + ) + self.on.define_event( + f"{relation_alias}_read_only_endpoints_changed", + DatabaseReadOnlyEndpointsChangedEvent, + ) + + def _assign_relation_alias(self, relation_id: int) -> None: + """Assigns an alias to a relation. + + This function writes in the unit data bag. + + Args: + relation_id: the identifier for a particular relation. + """ + # If no aliases were provided, return immediately. + if not self.relations_aliases: + return + + # Return if an alias was already assigned to this relation + # (like when there are more than one unit joining the relation). + if ( + self.charm.model.get_relation(self.relation_name, relation_id) + .data[self.local_unit] + .get("alias") + ): + return + + # Retrieve the available aliases (the ones that weren't assigned to any relation). + available_aliases = self.relations_aliases[:] + for relation in self.charm.model.relations[self.relation_name]: + alias = relation.data[self.local_unit].get("alias") + if alias: + logger.debug("Alias %s was already assigned to relation %d", alias, relation.id) + available_aliases.remove(alias) + + # Set the alias in the unit relation databag of the specific relation. + relation = self.charm.model.get_relation(self.relation_name, relation_id) + relation.data[self.local_unit].update({"alias": available_aliases[0]}) + + def _diff(self, event: RelationChangedEvent) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + # Retrieve the old data from the data key in the local unit relation databag. + old_data = json.loads(event.relation.data[self.local_unit].get("data", "{}")) + # Retrieve the new data from the event relation databag. + new_data = { + key: value for key, value in event.relation.data[event.app].items() if key != "data" + } + + # These are the keys that were added to the databag and triggered this event. + added = new_data.keys() - old_data.keys() + # These are the keys that were removed from the databag and triggered this event. + deleted = old_data.keys() - new_data.keys() + # These are the keys that already existed in the databag, + # but had their values changed. + changed = { + key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key] + } + + # TODO: evaluate the possibility of losing the diff if some error + # happens in the charm before the diff is completely checked (DPE-412). + # Convert the new_data to a serializable format and save it for a next diff check. + event.relation.data[self.local_unit].update({"data": json.dumps(new_data)}) + + # Return the diff with all possible changes. + return Diff(added, changed, deleted) + + def _emit_aliased_event(self, event: RelationChangedEvent, event_name: str) -> None: + """Emit an aliased event to a particular relation if it has an alias. + + Args: + event: the relation changed event that was received. + event_name: the name of the event to emit. + """ + alias = self._get_relation_alias(event.relation.id) + if alias: + getattr(self.on, f"{alias}_{event_name}").emit( + event.relation, app=event.app, unit=event.unit + ) + + def _get_relation_alias(self, relation_id: int) -> Optional[str]: + """Returns the relation alias. + + Args: + relation_id: the identifier for a particular relation. + + Returns: + the relation alias or None if the relation was not found. + """ + for relation in self.charm.model.relations[self.relation_name]: + if relation.id == relation_id: + return relation.data[self.local_unit].get("alias") + return None + + def fetch_relation_data(self) -> dict: + """Retrieves data from relation. + + This function can be used to retrieve data from a relation + in the charm code when outside an event callback. + + Returns: + a dict of the values stored in the relation data bag + for all relation instances (indexed by the relation ID). + """ + data = {} + for relation in self.relations: + data[relation.id] = { + key: value for key, value in relation.data[relation.app].items() if key != "data" + } + return data + + def _update_relation_data(self, relation_id: int, data: dict) -> None: + """Updates a set of key-value pairs in the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + data: dict containing the key-value pairs + that should be updated in the relation. + """ + if self.local_unit.is_leader(): + relation = self.charm.model.get_relation(self.relation_name, relation_id) + relation.data[self.local_app].update(data) + + def _on_relation_joined_event(self, event: RelationJoinedEvent) -> None: + """Event emitted when the application joins the database relation.""" + # If relations aliases were provided, assign one to the relation. + self._assign_relation_alias(event.relation.id) + + # Sets both database and extra user roles in the relation + # if the roles are provided. Otherwise, sets only the database. + if self.extra_user_roles: + self._update_relation_data( + event.relation.id, + { + "database": self.database, + "extra-user-roles": self.extra_user_roles, + }, + ) + else: + self._update_relation_data(event.relation.id, {"database": self.database}) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the database relation has changed.""" + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Check if the database is created + # (the database charm shared the credentials). + if "username" in diff.added and "password" in diff.added: + # Emit the default event (the one without an alias). + logger.info("database created at %s", datetime.now()) + self.on.database_created.emit(event.relation, app=event.app, unit=event.unit) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "database_created") + + # Emit an endpoints changed event if the database + # added or changed this info in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + self.on.endpoints_changed.emit(event.relation, app=event.app, unit=event.unit) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "endpoints_changed") + + # Emit a read only endpoints changed event if the database + # added or changed this info in the relation databag. + if "read-only-endpoints" in diff.added or "read-only-endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("read-only-endpoints changed on %s", datetime.now()) + self.on.read_only_endpoints_changed.emit( + event.relation, app=event.app, unit=event.unit + ) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "read_only_endpoints_changed") + + @property + def relations(self) -> List[Relation]: + """The list of Relation instances associated with this relation_name.""" + return list(self.charm.model.relations[self.relation_name]) diff --git a/lib/charms/openstack_libs/v0/gnocchi_requires.py b/lib/charms/openstack_libs/v0/gnocchi_requires.py new file mode 100644 index 0000000..b800b92 --- /dev/null +++ b/lib/charms/openstack_libs/v0/gnocchi_requires.py @@ -0,0 +1,196 @@ +# Copyright 2022 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GnocchiRequires module. + +This library contains the Requires for handling the gnocchi interface. + +In order to use `GnocchiRequires` in your charm, +add the relation interface in the `metadata.yaml` file: +``` +requires: + metric-service: + interface: gnocchi +``` + +Also provide additional parameters to the charm object: + - region + +Two events are also available to respond to: + - connected + - ready + - goneaway + +A basic example showing the usage of this relation follows: + +``` +from charms.openstack_libs.v0.gnocchi_requires import GnocchiRequires + +class GnochiClientCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + + # Gnochi Requires + self.metric_service = GnocchiRequires( + self, "metric-service" + region = "region" + ) + + # Event handlers + self.framework.observe( + self.metric_service.on.connected, + self._on_metric_service_connected) + self.framework.observe( + self.metric_service.on.ready, + self._on_metric_service_ready) + self.framework.observe( + self.metric_service.on.goneaway, + self._on_metric_service_goneaway) + + def _on_metric_service_connected(self, event): + '''React to the GnocchiRequires connected event. + + This event happens when a GnocchiRequires relation is added to the + model before credentials etc have been provided. + ''' + # Do something before the relation is complete + pass + + def _on_metric_service_ready(self, event): + '''React to the Gnochi ready event. + + The GnocchiRequires interface will use the provided config for the + request to the metric server. + ''' + # GnocchiRequires Relation is ready. Do something with the + # completed relation. + pass + + def _on_metric_service_goneaway(self, event): + '''React to the Gnochi goneaway event. + + This event happens when an Gnochi relation is removed. + ''' + # GnocchiRequires Relation has goneaway. shutdown services or suchlike + pass +``` +""" + +import logging + +from ops.framework import EventBase, EventSource, Object, ObjectEvents +from ops.model import Relation + +# The unique Charmhub library identifier, never change it +LIBID = "bdc4aef454524b6eaa90501b3c9d500c" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 1 + + +class GnocchiConnectedEvent(EventBase): + """Gnocchi connected Event.""" + + pass + + +class GnocchiReadyEvent(EventBase): + """Gnocchi ready for use Event.""" + + pass + + +class GnocchiGoneAwayEvent(EventBase): + """Gnocchi relation has gone-away Event.""" + + pass + + +class GnocchiServerEvents(ObjectEvents): + """Events class for `on`.""" + + connected = EventSource(GnocchiConnectedEvent) + ready = EventSource(GnocchiReadyEvent) + goneaway = EventSource(GnocchiGoneAwayEvent) + + +class GnocchiRequires(Object): + """Requires side interface for gnocchi interface type.""" + + on = GnocchiServerEvents() + + def __init__(self, charm, relation_name: str): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_gnocchi_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_gnocchi_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_departed, + self._on_gnocchi_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_gnocchi_relation_broken, + ) + + def _on_gnocchi_relation_joined(self, event): + """Gnocchi relation joined.""" + logging.debug("Gnocchi on_joined") + self.on.connected.emit() + + def _on_gnocchi_relation_changed(self, event): + """Gnocchi relation changed.""" + logging.debug("Gnocchi on_changed") + try: + self.gnocchi_url + self.on.ready.emit() + except AttributeError: + pass + + def _on_gnocchi_relation_broken(self, event): + """Gnocchi relation broken.""" + logging.debug("Gnocchi on_broken") + self.on.goneaway.emit() + + @property + def _gnocchi_rel(self) -> Relation: + """The Gnocchi relation.""" + return self.framework.model.get_relation(self.relation_name) + + def _get_remote_unit_data(self, key: str) -> str: + """Return the value for the given key from remote app data.""" + for unit in self._gnocchi_rel.units: + data = self._gnocchi_rel.data[unit] + return data.get(key) + + def get_data(self, key: str) -> str: + """Return the value for the given key.""" + return self._get_remote_unit_data(key) + + @property + def gnocchi_url(self) -> str: + """Return the gnocchi_url.""" + return self.get_data("gnocchi_url") diff --git a/lib/charms/openstack_libs/v0/keystone_requires.py b/lib/charms/openstack_libs/v0/keystone_requires.py new file mode 100644 index 0000000..9e816d0 --- /dev/null +++ b/lib/charms/openstack_libs/v0/keystone_requires.py @@ -0,0 +1,381 @@ +# Copyright 2022 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""KeystoneRequires module. + +This library contains the Requires for handling the keystone interface. + +Import `KeystoneRequires` in your charm, with the charm object and the +relation name: + - self + - "identity-service" + +Also provide additional parameters to the charm object: + - service + - internal_url + - public_url + - admin_url + - region + +Two events are also available to respond to: + - connected + - ready + - goneaway + +A basic example showing the usage of this relation follows: + +``` +from charms.openstack_libs.v0.keystone_requires import KeystoneRequires + +class IdentityServiceClientCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + # IdentityService Requires + self.identity_service = KeystoneRequires( + self, "identity-service", + service = "my-service" + internal_url = "http://internal-url" + public_url = "http://public-url" + admin_url = "http://admin-url" + region = "region" + ) + self.framework.observe( + self.identity_service.on.connected, + self._on_identity_service_connected) + self.framework.observe( + self.identity_service.on.ready, + self._on_identity_service_ready) + self.framework.observe( + self.identity_service.on.goneaway, + self._on_identity_service_goneaway) + + def _on_identity_service_connected(self, event): + '''React to the KeystoneRequires connected event. + + This event happens when a KeystoneRequires relation is added to the + model before credentials etc have been provided. + ''' + # Do something before the relation is complete + pass + + def _on_identity_service_ready(self, event): + '''React to the IdentityService ready event. + + The KeystoneRequires interface will use the provided config for the + request to the identity server. + ''' + # KeystoneRequires Relation is ready. Do something with the + # completed relation. + pass + + def _on_identity_service_goneaway(self, event): + '''React to the IdentityService goneaway event. + + This event happens when an IdentityService relation is removed. + ''' + # KeystoneRequires Relation has goneaway. shutdown services or suchlike + pass +``` +""" + +import json +import logging + +from ops.framework import EventBase, EventSource, Object, ObjectEvents +from ops.model import Relation + +logger = logging.getLogger(__name__) + +# The unique Charmhub library identifier, never change it +LIBID = "dae9fea1f8894b6295f0161b7ef7b7dc" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 1 + + +class KeystoneConnectedEvent(EventBase): + """Keystone connected Event.""" + + pass + + +class KeystoneReadyEvent(EventBase): + """Keystone ready for use Event.""" + + pass + + +class KeystoneGoneAwayEvent(EventBase): + """Keystone relation has gone-away Event.""" + + pass + + +class KeystoneServerEvents(ObjectEvents): + """Events class for `on`.""" + + connected = EventSource(KeystoneConnectedEvent) + ready = EventSource(KeystoneReadyEvent) + goneaway = EventSource(KeystoneGoneAwayEvent) + + +class KeystoneRequires(Object): + """Requires side interface for keystone interface type.""" + + on = KeystoneServerEvents() + + _backwards_compat_remaps = { + "admin-user-name": "admin_user", + "service-user-name": "service_username", + "service-project-name": "service_tenant", + "service-project-id": "service_tenant_id", + "service-domain-name": "service_domain", + } + + def __init__( + self, charm, relation_name: str, service_endpoints: list, region: str + ): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.service_endpoints = service_endpoints + self.region = region + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_keystone_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_keystone_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_departed, + self._on_keystone_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_keystone_relation_broken, + ) + + def _on_keystone_relation_joined(self, event): + """Keystone relation joined.""" + logging.debug("Keystone on_joined") + self.on.connected.emit() + self.register_services(self.service_endpoints, self.region) + + def _on_keystone_relation_changed(self, event): + """Keystone relation changed.""" + logging.debug("Keystone on_changed") + try: + self.service_password + self.on.ready.emit() + except AttributeError: + pass + + def _on_keystone_relation_broken(self, event): + """Keystone relation broken.""" + logging.debug("Keystone on_broken") + self.on.goneaway.emit() + + @property + def _keystone_rel(self) -> Relation: + """The Keystone relation.""" + return self.framework.model.get_relation(self.relation_name) + + def _get_remote_app_data(self, key: str) -> str: + """Return the value for the given key from remote app data.""" + data = self._keystone_rel.data[self._keystone_rel.app] + return data.get(key) + + def _get_remote_unit_data(self, key: str) -> str: + """Return the value for the given key from remote unit data.""" + # NOTE: deal with remapping and transpose of + # "-" -> "_" for backwards compatibility + _legacy_key = self._backwards_compat_remaps.get( + key, key.replace("-", "_") + ) + for unit in self._keystone_rel.units: + data = self._keystone_rel.data[unit] + if _legacy_key in data: + return data[_legacy_key] + + def get_data(self, key: str) -> str: + """Return the value for the given key. + + This method will inspect the application data bag first + and then fallback to per-unit databags for backwards + compatibility. + """ + return self._get_remote_app_data(key) or self._get_remote_unit_data( + key + ) + + @property + def api_version(self) -> str: + """Return the api_version.""" + return self.get_data("api-version") + + @property + def auth_host(self) -> str: + """Return the auth_host.""" + return self.get_data("auth-host") + + @property + def auth_port(self) -> str: + """Return the auth_port.""" + return self.get_data("auth-port") + + @property + def auth_protocol(self) -> str: + """Return the auth_protocol.""" + return self.get_data("auth-protocol") + + @property + def internal_host(self) -> str: + """Return the internal_host.""" + return self.get_data("internal-host") + + @property + def internal_port(self) -> str: + """Return the internal_port.""" + return self.get_data("internal-port") + + @property + def internal_protocol(self) -> str: + """Return the internal_protocol.""" + return self.get_data("internal-protocol") + + @property + def admin_domain_name(self) -> str: + """Return the admin_domain_name.""" + return self.get_data("admin-domain-name") + + @property + def admin_domain_id(self) -> str: + """Return the admin_domain_id.""" + return self.get_data("admin-domain-id") + + @property + def admin_project_name(self) -> str: + """Return the admin_project_name.""" + return self.get_data("admin-project-name") + + @property + def admin_project_id(self) -> str: + """Return the admin_project_id.""" + return self.get_data("admin-project-id") + + @property + def admin_user_name(self) -> str: + """Return the admin_user_name.""" + return self.get_data("admin-user-name") + + @property + def admin_user_id(self) -> str: + """Return the admin_user_id.""" + return self.get_data("admin-user-id") + + @property + def service_domain_name(self) -> str: + """Return the service_domain_name.""" + return self.get_data("service-domain-name") + + @property + def service_domain_id(self) -> str: + """Return the service_domain_id.""" + return self.get_data("service-domain-id") + + @property + def service_host(self) -> str: + """Return the service_host.""" + return self.get_data("service-host") + + @property + def service_password(self) -> str: + """Return the service_password.""" + return self.get_data("service-password") + + @property + def service_port(self) -> str: + """Return the service_port.""" + return self.get_data("service-port") + + @property + def service_protocol(self) -> str: + """Return the service_protocol.""" + return self.get_data("service-protocol") + + @property + def service_project_name(self) -> str: + """Return the service_project_name.""" + return self.get_data("service-project-name") + + @property + def service_project_id(self) -> str: + """Return the service_project_id.""" + return self.get_data("service-project-id") + + @property + def service_user_name(self) -> str: + """Return the service_user_name.""" + return self.get_data("service-user-name") + + @property + def service_user_id(self) -> str: + """Return the service_user_id.""" + return self.get_data("service-user-id") + + @property + def internal_auth_url(self) -> str: + """Return the internal_auth_url.""" + return self.get_data("internal-auth-url") + + @property + def admin_auth_url(self) -> str: + """Return the admin_auth_url.""" + return self.get_data("admin-auth-url") + + @property + def public_auth_url(self) -> str: + """Return the public_auth_url.""" + return self.get_data("public-auth-url") + + def register_services(self, service_endpoints: list, region: str) -> None: + """Request access to the Keystone server.""" + # NOTE: + # backward compatibility with keystone machine charm + # only supports single endpoint type registration + relation_data = { + "service": service_endpoints[0]["service_name"], + "public_url": service_endpoints[0]["public_url"], + "internal_url": service_endpoints[0]["internal_url"], + "admin_url": service_endpoints[0]["admin_url"], + "region": region, + } + unit_data = self._keystone_rel.data[self.charm.unit] + unit_data.update(relation_data) + + # NOTE: + # Forward compatibility with keystone k8s operator + if self.model.unit.is_leader(): + logging.debug("Requesting service registration") + app_data = self._keystone_rel.data[self.charm.app] + app_data["service-endpoints"] = json.dumps( + service_endpoints, sort_keys=True + ) + app_data["region"] = region diff --git a/lib/charms/openstack_libs/v0/rabbitmq_requires.py b/lib/charms/openstack_libs/v0/rabbitmq_requires.py new file mode 100644 index 0000000..fe7ce70 --- /dev/null +++ b/lib/charms/openstack_libs/v0/rabbitmq_requires.py @@ -0,0 +1,217 @@ +# Copyright 2022 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""RabbitMQRequires module. + +This library contains the Requires for handling the rabbitmq interface. + +Import `RabbitMQRequires` in your charm, with the charm object and the +relation name: + - self + - "amqp" + +Two events are also available to respond to: + - connected + - ready + - goneaway + +A basic example showing the usage of this relation follows: + +``` +from charms.openstack_libs.v0.rabbitmq_requires import RabbitMQRequires + +class RabbitMQClientCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + # RabbitMQ Requires + self.rabbitmq = RabbitMQRequires( + self, "amqp", + username = "my-user", + vhost = "my-vhost", + ) + self.framework.observe( + self.rabbitmq.on.connected, + self._on_amqp_connected) + self.framework.observe( + self.rabbitmq.on.ready, + self._on_amqp_ready) + self.framework.observe( + self.rabbitmq.on.goneaway, + self._on_amqp_goneaway) + + def _on_amqp_connected(self, event): + '''React to the RabbitMQRequires connected event. + + This event happens when a RabbitMQRequires relation is added to the + model before credentials etc have been provided. + ''' + # Do something before the relation is complete + pass + + def _on_amqp_ready(self, event): + '''React to the RabbitMQ ready event. + + The RabbitMQRequires interface will use the provided config for the + request to the identity server. + ''' + # RabbitMQRequires Relation is ready. Do something with the + # completed relation. + pass + + def _on_amqp_goneaway(self, event): + '''React to the RabbitMQ goneaway event. + + This event happens when an RabbitMQ relation is removed. + ''' + # RabbitMQRequires Relation has goneaway. shutdown services or suchlike + pass +``` +""" + +import logging + +from ops.framework import EventBase, EventSource, Object, ObjectEvents +from ops.model import Relation + +logger = logging.getLogger(__name__) + +# The unique Charmhub library identifier, never change it +LIBID = "dae9fea1f8894b6295f0161b7ef7b7dc" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 1 + + +class RabbitMQConnectedEvent(EventBase): + """RabbitMQ connected Event.""" + + pass + + +class RabbitMQReadyEvent(EventBase): + """RabbitMQ ready for use Event.""" + + pass + + +class RabbitMQGoneAwayEvent(EventBase): + """RabbitMQ relation has gone-away Event.""" + + pass + + +class RabbitMQServerEvents(ObjectEvents): + """Events class for `on`.""" + + connected = EventSource(RabbitMQConnectedEvent) + ready = EventSource(RabbitMQReadyEvent) + goneaway = EventSource(RabbitMQGoneAwayEvent) + + +class RabbitMQRequires(Object): + """Requires side interface for rabbitmq interface type.""" + + on = RabbitMQServerEvents() + + def __init__(self, charm, relation_name: str, username: str, vhost: str): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self._username = username + self._vhost = vhost + + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_rabbitmq_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_rabbitmq_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_departed, + self._on_rabbitmq_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_rabbitmq_relation_broken, + ) + + def _on_rabbitmq_relation_joined(self, event): + """Rabbitmq relation joined.""" + logging.debug("RabbitMQ on_joined") + self.on.connected.emit() + self.register() + + def _on_rabbitmq_relation_changed(self, event): + """Rabbitmq relation changed.""" + logging.debug("RabbitMQ on_changed") + try: + self.password + self.on.ready.emit() + except AttributeError: + pass + + def _on_rabbitmq_relation_broken(self, event): + """Rabbitmq relation broken.""" + logging.debug("RabbitMQ on_broken") + self.on.goneaway.emit() + + @property + def _rabbitmq_rel(self) -> Relation: + """The RabbitMQ relation.""" + return self.framework.model.get_relation(self.relation_name) + + @property + def hostname(self) -> str: + """Return the hostname.""" + return self._get_data("hostname") + + @property + def password(self) -> str: + """Return the password.""" + return self._get_data("password") + + @property + def username(self) -> str: + """Return the username.""" + return self._username + + @property + def vhost(self) -> str: + """Return the vhost.""" + return self._vhost + + def _get_remote_unit_data(self, key: str) -> str: + """Return the value for the given key from remote app data.""" + for unit in self._rabbitmq_rel.units: + data = self._rabbitmq_rel.data[unit] + return data.get(key) + + def _get_data(self, key: str) -> str: + """Return the value for the given key.""" + return self._get_remote_unit_data(key) + + def register(self) -> None: + """Request access to the RabbitMQ server.""" + relation_data = { + "username": self.username, + "vhost": self.vhost, + } + unit_data = self._rabbitmq_rel.data[self.charm.unit] + unit_data.update(relation_data) diff --git a/lib/charms/operator_libs_linux/v1/systemd.py b/lib/charms/operator_libs_linux/v1/systemd.py new file mode 100644 index 0000000..5be34c1 --- /dev/null +++ b/lib/charms/operator_libs_linux/v1/systemd.py @@ -0,0 +1,219 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Abstractions for stopping, starting and managing system services via systemd. + +This library assumes that your charm is running on a platform that uses systemd. E.g., +Centos 7 or later, Ubuntu Xenial (16.04) or later. + +For the most part, we transparently provide an interface to a commonly used selection of +systemd commands, with a few shortcuts baked in. For example, service_pause and +service_resume with run the mask/unmask and enable/disable invocations. + +Example usage: +```python +from charms.operator_libs_linux.v0.systemd import service_running, service_reload + +# Start a service +if not service_running("mysql"): + success = service_start("mysql") + +# Attempt to reload a service, restarting if necessary +success = service_reload("nginx", restart_on_failure=True) +``` + +""" + +import logging +import subprocess + +__all__ = [ # Don't export `_systemctl`. (It's not the intended way of using this lib.) + "service_pause", + "service_reload", + "service_restart", + "service_resume", + "service_running", + "service_start", + "service_stop", + "daemon_reload", +] + +logger = logging.getLogger(__name__) + +# The unique Charmhub library identifier, never change it +LIBID = "045b0d179f6b4514a8bb9b48aee9ebaf" + +# Increment this major API version when introducing breaking changes +LIBAPI = 1 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 0 + + +class SystemdError(Exception): + pass + + +def _popen_kwargs(): + return dict( + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + bufsize=1, + universal_newlines=True, + encoding="utf-8", + ) + + +def _systemctl( + sub_cmd: str, service_name: str = None, now: bool = None, quiet: bool = None +) -> bool: + """Control a system service. + + Args: + sub_cmd: the systemctl subcommand to issue + service_name: the name of the service to perform the action on + now: passes the --now flag to the shell invocation. + quiet: passes the --quiet flag to the shell invocation. + """ + cmd = ["systemctl", sub_cmd] + + if service_name is not None: + cmd.append(service_name) + if now is not None: + cmd.append("--now") + if quiet is not None: + cmd.append("--quiet") + if sub_cmd != "is-active": + logger.debug("Attempting to {} '{}' with command {}.".format(cmd, service_name, cmd)) + else: + logger.debug("Checking if '{}' is active".format(service_name)) + + proc = subprocess.Popen(cmd, **_popen_kwargs()) + last_line = "" + for line in iter(proc.stdout.readline, ""): + last_line = line + logger.debug(line) + + proc.wait() + + if sub_cmd == "is-active": + # If we are just checking whether a service is running, return True/False, rather + # than raising an error. + if proc.returncode < 1: + return True + if proc.returncode == 3: # Code returned when service is not active. + return False + + if proc.returncode < 1: + return True + + raise SystemdError( + "Could not {}{}: systemd output: {}".format( + sub_cmd, " {}".format(service_name) if service_name else "", last_line + ) + ) + + +def service_running(service_name: str) -> bool: + """Determine whether a system service is running. + + Args: + service_name: the name of the service + """ + return _systemctl("is-active", service_name, quiet=True) + + +def service_start(service_name: str) -> bool: + """Start a system service. + + Args: + service_name: the name of the service to stop + """ + return _systemctl("start", service_name) + + +def service_stop(service_name: str) -> bool: + """Stop a system service. + + Args: + service_name: the name of the service to stop + """ + return _systemctl("stop", service_name) + + +def service_restart(service_name: str) -> bool: + """Restart a system service. + + Args: + service_name: the name of the service to restart + """ + return _systemctl("restart", service_name) + + +def service_reload(service_name: str, restart_on_failure: bool = False) -> bool: + """Reload a system service, optionally falling back to restart if reload fails. + + Args: + service_name: the name of the service to reload + restart_on_failure: boolean indicating whether to fallback to a restart if the + reload fails. + """ + try: + return _systemctl("reload", service_name) + except SystemdError: + if restart_on_failure: + return _systemctl("restart", service_name) + else: + raise + + +def service_pause(service_name: str) -> bool: + """Pause a system service. + + Stop it, and prevent it from starting again at boot. + + Args: + service_name: the name of the service to pause + """ + _systemctl("disable", service_name, now=True) + _systemctl("mask", service_name) + + if not service_running(service_name): + return True + + raise SystemdError("Attempted to pause '{}', but it is still running.".format(service_name)) + + +def service_resume(service_name: str) -> bool: + """Resume a system service. + + Re-enable starting again at boot. Start the service. + + Args: + service_name: the name of the service to resume + """ + _systemctl("unmask", service_name) + _systemctl("enable", service_name, now=True) + + if service_running(service_name): + return True + + raise SystemdError("Attempted to resume '{}', but it is not running.".format(service_name)) + + +def daemon_reload() -> bool: + """Reload systemd manager configuration.""" + return _systemctl("daemon-reload") diff --git a/metadata.yaml b/metadata.yaml new file mode 100644 index 0000000..6f90464 --- /dev/null +++ b/metadata.yaml @@ -0,0 +1,24 @@ +name: cloudkitty +maintainer: OpenStack Charmers +display-name: Cloudkitty +summary: Rating as a Service add-on for Openstack +description: | + Rating as a Service add-on for Openstack. + +tags: + - openstack +series: + - jammy + - focal +extra-bindings: + public: + +requires: + identity-service: + interface: keystone + database: + interface: mysql_client + metric-service: + interface: gnocchi + amqp: + interface: rabbitmq diff --git a/osci.yaml b/osci.yaml new file mode 100644 index 0000000..15208fc --- /dev/null +++ b/osci.yaml @@ -0,0 +1,9 @@ +- project: + templates: + - charm-yoga-unit-jobs + - charm-yoga-functional-jobs + vars: + needs_charm_build: true + charm_build_name: cloudkitty + build_type: charmcraft + diff --git a/rename.sh b/rename.sh new file mode 100755 index 0000000..303a9ae --- /dev/null +++ b/rename.sh @@ -0,0 +1,14 @@ +#!/bin/bash +charm=$(grep "charm_build_name" osci.yaml | awk '{print $2}') +echo "renaming ${charm}_*.charm to ${charm}.charm" +echo -n "pwd: " +pwd +ls -al +echo "Removing bad downloaded charm maybe?" +if [[ -e "${charm}.charm" ]]; +then + rm "${charm}.charm" +fi +echo "Renaming charm here." +mv ${charm}_*.charm ${charm}.charm + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..8cce361 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,2 @@ +ops>=1.5.0 +git+https://opendev.org/openstack/charm-ops-openstack@master#egg=ops_openstack diff --git a/run_tests b/run_tests new file mode 100755 index 0000000..761fbc4 --- /dev/null +++ b/run_tests @@ -0,0 +1,15 @@ +#!/bin/sh -e + +if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then + . venv/bin/activate +fi + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH="lib:src" +else + export PYTHONPATH="lib:src:$PYTHONPATH" +fi + +flake8 +coverage run --branch --source=src -m unittest -v "$@" +coverage report -m diff --git a/src/charm.py b/src/charm.py new file mode 100755 index 0000000..22f76b4 --- /dev/null +++ b/src/charm.py @@ -0,0 +1,251 @@ +#!/usr/bin/env python3 + +"""Charm the service. + +Refer to the following post for a quick-start guide that will help you +develop a new k8s charm using the Operator Framework: + + https://discourse.charmhub.io/t/4208 +""" + +import logging +import subprocess + +from pathlib import Path + +from ops_openstack.core import OSBaseCharm + +from ops.framework import StoredState +from ops.main import main + +from ops.model import ( + ActiveStatus +) + +from charmhelpers.core import templating +from charmhelpers.core.host import restart_on_change + +from charmhelpers.contrib.openstack import templating as os_templating + +from charms.openstack_libs.v0.keystone_requires import ( + KeystoneRequires +) + +from charms.openstack_libs.v0.gnocchi_requires import ( + GnocchiRequires +) + +from charms.openstack_libs.v0.rabbitmq_requires import ( + RabbitMQRequires +) + +from charms.data_platform_libs.v0.database_requires import ( + DatabaseRequires +) + +from charms.operator_libs_linux.v1.systemd import ( + service_restart +) + +logger = logging.getLogger(__name__) + + +class CloudkittyCharm(OSBaseCharm): + """Charm the service.""" + _stored = StoredState() + + PACKAGES = [ + 'cloudkitty-api', + 'cloudkitty-processor' + ] + + REQUIRED_RELATIONS = [ + 'database', + 'identity-service', + 'metric-service', + 'amqp' + ] + + CONFIG_FILE_OWNER = 'cloudkitty' + CONFIG_FILE_GROUP = 'cloudkitty' + CONFIG_DIR = Path('/etc/cloudkitty') + CONFIG_FILE = 'cloudkitty.conf' + CONFIG_PATH = CONFIG_DIR / CONFIG_FILE + + SERVICES = ['cloudkitty-api', 'cloudkitty-processor'] + RESTART_MAP = { + str(CONFIG_PATH): SERVICES + } + + release = 'yoga' + + def __init__(self, framework): + super().__init__(framework) + super().register_status_check(self.status_check) + + self._app_name = self.model.app.name + self._address = None + + self._stored.is_started = True + + self.identity_service = KeystoneRequires( + charm=self, + relation_name='identity-service', + service_endpoints=[{ + 'service_name': self._app_name, + 'internal_url': self.service_url('internal'), + 'public_url': self.service_url('public'), + 'admin_url': self.service_url('public') + }], + region=self.model.config['region'] + ) + + self.metric_service = GnocchiRequires( + charm=self, + relation_name='metric-service' + ) + + self.rabbitmq = RabbitMQRequires( + charm=self, + relation_name='amqp', + username=self._app_name, + vhost=self._app_name, + ) + + self.database = DatabaseRequires( + charm=self, + relation_name='database', + database_name=self._app_name + ) + + self.framework.observe(self.on.config_changed, + self._on_config_changed) + self.framework.observe(self.identity_service.on.ready, + self._on_identity_service_ready) + self.framework.observe(self.metric_service.on.ready, + self._on_metric_service_ready) + self.framework.observe(self.database.on.database_created, + self._on_database_created) + self.framework.observe(self.rabbitmq.on.ready, + self._on_amqp_ready) + self.framework.observe(self.on.restart_services_action, + self._on_restart_services_action) + + @property + def protocol(self): + return 'http' + + @property + def host(self) -> str: + if self._address is None: + binding = self.model.get_binding('public') + self._address = binding.network.bind_address + return str(self._address) + + @property + def port(self) -> int: + return 8889 + + def service_url(self, _) -> str: + return f'{self.protocol}://{self.host}:{self.port}' + + def status_check(self): + return ActiveStatus() + + @restart_on_change(RESTART_MAP) + def _render_config(self, _) -> str: + """Render configuration + + Render related services configuration into + cloudkitty configuration file + """ + _template_loader = os_templating.get_loader( + 'templates/', + self.release + ) + + _context = { + 'options': self.model.config, + 'identity_service': self.identity_service, + 'metric_service': self.metric_service, + 'databases': self.database.fetch_relation_data(), + 'rabbitmq': self.rabbitmq, + } + + return templating.render( + source=self.CONFIG_FILE, + target=self.CONFIG_PATH, + context=_context, + template_loader=_template_loader, + owner=self.CONFIG_FILE_OWNER, + group=self.CONFIG_FILE_GROUP, + perms=0o640 + ) + + def _bootstrap_db(self): + """Bootstrap Database + + On this function we handle the execution of + the storage initialization and then dbsync upgrade. + If any of the command fails it will return a non-zero + value and unit falls into error state. + + This method is only executed on the leader unit. + """ + if not self.model.unit.is_leader(): + logger.info('unit is not leader, skipping bootstrap db') + return + + logger.info('starting cloudkitty db migration') + + commands = [ + ['cloudkitty-storage-init'], + ['cloudkitty-dbsync', 'upgrade'] + ] + + for cmd in commands: + logger.info(f"executing {cmd} command") + subprocess.check_call(cmd) + + def _on_config_changed(self, event): + """ Handle config changed event. + """ + self._render_config(event) + self.update_status() + + def _on_identity_service_ready(self, event): + """ Handle identity-service relation ready event. + """ + self._render_config(event) + self.update_status() + + def _on_metric_service_ready(self, event): + """ Handle metric-service relation ready event. + """ + self._render_config(event) + self.update_status() + + def _on_database_created(self, event): + """ Handle Database created event. + """ + self._render_config(event) + self._bootstrap_db() + self.update_status() + + def _on_amqp_ready(self, event): + """ Handle RabbitMQ relation ready event. + """ + self._render_config(event) + self.update_status() + + def _on_restart_services_action(self, event): + """ Restart cloudkitty services action. + """ + event.log(f"restarting services {', '.join(self.SERVICES)}") + for service in self.SERVICES: + if service_restart(service): + event.fail(f"Failed to restart service: {service}") + + +if __name__ == "__main__": + main(CloudkittyCharm) diff --git a/templates/cloudkitty.conf b/templates/cloudkitty.conf new file mode 100644 index 0000000..1447c61 --- /dev/null +++ b/templates/cloudkitty.conf @@ -0,0 +1,78 @@ +[DEFAULT] +verbose = true +debug = {{ options.debug }} +log_dir = /var/log/cloudkitty +auth_strategy = keystone +{% if rabbitmq.password -%} +transport_url = rabbit://{{ rabbitmq.username }}:{{ rabbitmq.password }}@{{ rabbitmq.hostname }}:5672/{{ rabbitmq.vhost }} + +[oslo_messaging_rabbit] + +[oslo_messaging_notifications] +driver = messagingv2 +transport_url = rabbit://{{ rabbitmq.username }}:{{ rabbitmq.password }}@{{ rabbitmq.hostname }}:5672/{{ rabbitmq.vhost }} + +{% endif -%} + +{% if identity_service -%} +[keystone_authtoken] +auth_section = ks_auth + +[ks_auth] +auth_type = v3password +auth_protocol = {{ identity_service.auth_protocol }} +auth_uri = {{ identity_service.auth_protocol }}://{{ identity_service.auth_host }}:{{ identity_service.service_port }}/v{{ identity_service.api_version }} +auth_url = {{ identity_service.auth_protocol }}://{{ identity_service.auth_host }}:{{ identity_service.auth_port}}/v{{ identity_service.api_version }} +project_domain_name = {{ identity_service.service_domain_name }} +user_domain_name = {{ identity_service.service_domain_name }} +identity_uri = {{ identity_service.auth_protocol }}://{{ identity_service.auth_host }}:{{ identity_service.service_port }}/v{{ identity_service.api_version }} +project_name = {{ identity_service.service_project_name }} +username = {{ identity_service.service_user_name }} +password = {{ identity_service.service_password }} +region_name = {{ options.region }} +insecure = true + +{% endif -%} + +{% if databases -%} +{% for _, database in databases.items() %} +[database] +connection = mysql+pymysql://{{ database.username }}:{{ database.password }}@{{ database.endpoints }}/cloudkitty + +[storage] +version = 1 +backend = sqlalchemy + +{% endfor %} +{% endif -%} + +{% if influxdb -%} +[storage] +version = 2 +backend = influxdb + +[storage_influxdb] +username = {{ influxdb.user }} +password = {{ influxdb.password }} +database = {{ influxdb.db }} +host = {{ influxdb.host }} + +{% endif -%} + +{% if metric_service.gnocchi_url -%} +[fetcher] +backend = gnocchi + +[fetcher_gnocchi] +auth_section = ks_auth +gnocchi_endpoint = {{ metric_service.gnocchi_url }} +region_name = {{ options.region }} + +[collect] +collector = gnocchi +metrics_conf = /etc/cloudkitty/metrics.yml + +[collector_gnocchi] +auth_section = ks_auth +region_name = {{ options.region }} +{% endif -%} diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 0000000..3c7fa14 --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,10 @@ +coverage +flake8 +stestr +git+https://github.com/openstack-charmers/zaza.git#egg=zaza +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack +requests-mock +ripdb +git+https://github.com/openstack/charm-ops-sunbeam.git +mock +git+https://opendev.org/openstack/charm-ops-openstack@master#egg=ops_openstack diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 0000000..19016c4 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,18 @@ +# Overview + +This directory provides Zaza test definitions and bundles to verify basic +deployment functionality from the perspective of this charm, its requirements +and its features, as exercised in a subset of the full OpenStack deployment +test bundle topology. + +Run the smoke tests with: + +```bash +cd ../ +tox -e build +tox -e func-smoke +``` + +For full details on functional testing of OpenStack charms please refer to +the [functional testing](https://docs.openstack.org/charm-guide/latest/reference/testing.html#functional-testing) +section of the OpenStack Charm Guide. \ No newline at end of file diff --git a/tests/bundles/focal-yoga.yaml b/tests/bundles/focal-yoga.yaml new file mode 100644 index 0000000..854d0ca --- /dev/null +++ b/tests/bundles/focal-yoga.yaml @@ -0,0 +1,285 @@ +variables: + openstack-origin: &openstack-origin distro + +series: focal + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + series: focal + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + '18': + '19': + +applications: + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + glance-mysql-router: + charm: ch:mysql-router + channel: latest/edge + nova-cloud-controller-mysql-router: + charm: ch:mysql-router + channel: latest/edge + placement-mysql-router: + charm: ch:mysql-router + channel: latest/edge + gnocchi-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + channel: latest/edge + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + + rabbitmq-server: + charm: ch:rabbitmq-server + channel: latest/edge + num_units: 1 + options: + source: *openstack-origin + to: + - '3' + + memcached: + charm: ch:memcached + channel: latest/edge + num_units: 1 + # Note that holding memcached at focal as it's not available at jammy yet. + series: focal + to: + - '4' + + ceph-osd: + charm: ch:ceph-osd + channel: latest/edge + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *openstack-origin + to: + - '5' + - '6' + - '7' + + ceph-mon: + charm: ch:ceph-mon + channel: latest/edge + num_units: 3 + options: + source: *openstack-origin + to: + - '8' + - '9' + - '10' + + keystone: + charm: ch:keystone + channel: latest/edge + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + + glance: + charm: ch:glance + channel: latest/edge + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + + nova-cloud-controller: + charm: ch:nova-cloud-controller + channel: latest/edge + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '13' + + placement: + charm: ch:placement + channel: latest/edge + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + + nova-compute: + charm: ch:nova-compute + channel: latest/edge + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + + ceilometer: + charm: ch:ceilometer + channel: latest/edge + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '16' + + gnocchi: + charm: ch:gnocchi + channel: latest/edge + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '17' + + ceilometer-agent: + charm: ch:ceilometer-agent + channel: latest/edge + + cloudkitty: + charm: ../../cloudkitty.charm + num_units: 1 + to: + - '18' + + mysql: + charm: ch:mysql + channel: latest/edge + num_units: 1 + to: + - '19' + +relations: + + - - 'cloudkitty' + - 'mysql' + + - - 'cloudkitty' + - 'keystone' + + - - 'cloudkitty' + - 'gnocchi' + + - - 'cloudkitty' + - 'rabbitmq-server' + + - - 'ceilometer:amqp' + - 'rabbitmq-server:amqp' + + - - 'ceilometer:identity-notifications' + - 'keystone:identity-notifications' + + - - 'ceilometer:ceilometer-service' + - 'ceilometer-agent:ceilometer-service' + + - - 'ceilometer:metric-service' + - 'gnocchi:metric-service' + + - - 'ceilometer:identity-credentials' + - 'keystone:identity-credentials' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceilometer-agent:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:nova-ceilometer' + - 'ceilometer-agent:nova-ceilometer' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement:identity-service' + - 'keystone:identity-service' + + - - 'placement:placement' + - 'nova-cloud-controller:placement' + + - - 'ceph-mon:osd' + - 'ceph-osd:mon' + + - - 'gnocchi:shared-db' + - 'gnocchi-mysql-router:shared-db' + - - 'gnocchi-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'gnocchi:storage-ceph' + - 'ceph-mon:client' + + - - 'gnocchi:coordinator-memcached' + - 'memcached:cache' + + - - 'gnocchi:identity-service' + - 'keystone:identity-service' diff --git a/tests/bundles/jammy-yoga.yaml b/tests/bundles/jammy-yoga.yaml new file mode 100644 index 0000000..124fff8 --- /dev/null +++ b/tests/bundles/jammy-yoga.yaml @@ -0,0 +1,287 @@ +variables: + openstack-origin: &openstack-origin distro + +series: jammy + +comment: +- 'machines section to decide order of deployment. database sooner = faster' +machines: + '0': + constraints: mem=3072M + '1': + constraints: mem=3072M + '2': + constraints: mem=3072M + '3': + '4': + # Note that holding memcached at focal as it's not available at jammy yet. + series: focal + '5': + '6': + '7': + '8': + '9': + '10': + '11': + '12': + '13': + '14': + '15': + '16': + '17': + '18': + '19': + series: focal + +applications: + + keystone-mysql-router: + charm: ch:mysql-router + channel: latest/edge + glance-mysql-router: + charm: ch:mysql-router + channel: latest/edge + nova-cloud-controller-mysql-router: + charm: ch:mysql-router + channel: latest/edge + placement-mysql-router: + charm: ch:mysql-router + channel: latest/edge + gnocchi-mysql-router: + charm: ch:mysql-router + channel: latest/edge + + mysql-innodb-cluster: + charm: ch:mysql-innodb-cluster + num_units: 3 + options: + source: *openstack-origin + to: + - '0' + - '1' + - '2' + channel: latest/edge + + rabbitmq-server: + charm: ch:rabbitmq-server + num_units: 1 + options: + source: *openstack-origin + to: + - '3' + channel: latest/edge + + memcached: + charm: ch:memcached + num_units: 1 + # Note that holding memcached at focal as it's not available at jammy yet. + series: focal + to: + - '4' + + ceph-osd: + charm: ch:ceph-osd + num_units: 3 + storage: + osd-devices: 'cinder,10G' + options: + source: *openstack-origin + to: + - '5' + - '6' + - '7' + channel: latest/edge + + ceph-mon: + charm: ch:ceph-mon + num_units: 3 + options: + source: *openstack-origin + to: + - '8' + - '9' + - '10' + channel: latest/edge + + keystone: + charm: ch:keystone + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '11' + channel: latest/edge + + glance: + charm: ch:glance + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '12' + channel: latest/edge + + nova-cloud-controller: + charm: ch:nova-cloud-controller + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '13' + channel: latest/edge + + placement: + charm: ch:placement + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '14' + channel: latest/edge + + nova-compute: + charm: ch:nova-compute + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '15' + channel: latest/edge + + ceilometer: + charm: ch:ceilometer + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '16' + channel: latest/edge + + gnocchi: + charm: ch:gnocchi + num_units: 1 + options: + openstack-origin: *openstack-origin + to: + - '17' + channel: latest/edge + + ceilometer-agent: + charm: ch:ceilometer-agent + channel: latest/edge + + cloudkitty: + charm: ../../cloudkitty.charm + num_units: 1 + to: + - '18' + + mysql: + charm: ch:mysql + series: focal + channel: latest/edge + num_units: 1 + to: + - '19' + +relations: + + - - 'cloudkitty' + - 'mysql' + + - - 'cloudkitty' + - 'keystone' + + - - 'cloudkitty' + - 'gnocchi' + + - - 'cloudkitty' + - 'rabbitmq-server' + + - - 'ceilometer:amqp' + - 'rabbitmq-server:amqp' + + - - 'ceilometer:identity-notifications' + - 'keystone:identity-notifications' + + - - 'ceilometer:ceilometer-service' + - 'ceilometer-agent:ceilometer-service' + + - - 'ceilometer:metric-service' + - 'gnocchi:metric-service' + + - - 'ceilometer:identity-credentials' + - 'keystone:identity-credentials' + + - - 'keystone:shared-db' + - 'keystone-mysql-router:shared-db' + - - 'keystone-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'ceilometer-agent:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:nova-ceilometer' + - 'ceilometer-agent:nova-ceilometer' + + - - 'nova-compute:amqp' + - 'rabbitmq-server:amqp' + + - - 'glance:identity-service' + - 'keystone:identity-service' + + - - 'glance:shared-db' + - 'glance-mysql-router:shared-db' + - - 'glance-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'glance:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-compute:image-service' + - 'glance:image-service' + + - - 'nova-cloud-controller:shared-db' + - 'nova-cloud-controller-mysql-router:shared-db' + - - 'nova-cloud-controller-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'nova-cloud-controller:amqp' + - 'rabbitmq-server:amqp' + + - - 'nova-cloud-controller:identity-service' + - 'keystone:identity-service' + + - - 'nova-cloud-controller:cloud-compute' + - 'nova-compute:cloud-compute' + + - - 'nova-cloud-controller:image-service' + - 'glance:image-service' + + - - 'placement:shared-db' + - 'placement-mysql-router:shared-db' + - - 'placement-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'placement:identity-service' + - 'keystone:identity-service' + + - - 'placement:placement' + - 'nova-cloud-controller:placement' + + - - 'ceph-mon:osd' + - 'ceph-osd:mon' + + - - 'gnocchi:shared-db' + - 'gnocchi-mysql-router:shared-db' + - - 'gnocchi-mysql-router:db-router' + - 'mysql-innodb-cluster:db-router' + + - - 'gnocchi:storage-ceph' + - 'ceph-mon:client' + + - - 'gnocchi:coordinator-memcached' + - 'memcached:cache' + + - - 'gnocchi:identity-service' + - 'keystone:identity-service' diff --git a/tests/tests.yaml b/tests/tests.yaml new file mode 100644 index 0000000..33e651c --- /dev/null +++ b/tests/tests.yaml @@ -0,0 +1,28 @@ +charm_name: cloudkitty + +smoke_bundles: +- jammy-yoga + +gate_bundles: +- jammy-yoga + +dev_bundles: +- jammy-yoga + +configure: +- zaza.openstack.charm_tests.ceilometer.setup.basic_setup + +tests: +- zaza.openstack.charm_tests.cloudkitty.tests.CloudkittyTest + +target_deploy_status: + mysql: + num-expected-units: 1 + workload-status: active + workload-status-message-prefix: "" + ceilometer: + workload-status: blocked + workload-status-message-prefix: "Run the ceilometer-upgrade action on the leader to initialize ceilometer and gnocchi" + +tests_options: + force_deploy: [] diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..bbf8ada --- /dev/null +++ b/tox.ini @@ -0,0 +1,141 @@ +# Operator charm (with zaza): tox.ini + +[tox] +envlist = pep8,py3 +skipsdist = True +# NOTE: Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE: Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False +# NOTES: +# * We avoid the new dependency resolver by pinning pip < 20.3, see +# https://github.com/pypa/pip/issues/9187 +# * Pinning dependencies requires tox >= 3.2.0, see +# https://tox.readthedocs.io/en/latest/config.html#conf-requires +# * It is also necessary to pin virtualenv as a newer virtualenv would still +# lead to fetching the latest pip in the func* tox targets, see +# https://stackoverflow.com/a/38133283 +# * It is necessary to declare setuptools as a dependency otherwise tox will +# fail very early at not being able to load it. The version pinning is in +# line with `pip.sh`. +requires = pip < 20.3 + virtualenv < 20.0 + setuptools < 50.0.0 +# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci +minversion = 3.2.0 + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 + CHARM_DIR={envdir} +install_command = + pip install {opts} {packages} +commands = stestr run --slowest {posargs} +allowlist_externals = + git + bash + charmcraft + rename.sh +passenv = HOME TERM CS_* OS_* TEST_* +deps = -r{toxinidir}/test-requirements.txt + +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py39] +basepython = python3.9 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py310] +basepython = python3.10 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:pep8] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = flake8 {posargs} src unit_tests tests + +[testenv:cover] +# Technique based heavily upon +# https://github.com/openstack/nova/blob/master/tox.ini +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + stestr run --slowest {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[coverage:run] +branch = True +concurrency = multiprocessing +parallel = True +source = + . +omit = + .tox/* + */charmhelpers/* + unit_tests/* + lib/* + +[testenv:venv] +basepython = python3 +commands = {posargs} + +[testenv:build] +basepython = python3 +deps = -r{toxinidir}/build-requirements.txt +# NOTE(lourot): charmcraft 1.0.0 used to generate +# nova-compute-nvidia-vgpu.charm, which is the behaviour expected by OSCI. +# However charmcraft 1.2.1 now generates +# nova-compute-nvidia-vgpu_ubuntu-20.04-amd64.charm instead. In order to keep +# the old behaviour we rename the file at the end. +commands = + charmcraft clean + charmcraft -v pack + {toxinidir}/rename.sh + +[testenv:func-noop] +basepython = python3 +commands = + functest-run-suite --help + +[testenv:func] +basepython = python3 +commands = + functest-run-suite --keep-model + +[testenv:func-smoke] +basepython = python3 +commands = + functest-run-suite --keep-model --smoke + +[testenv:func-dev] +basepython = python3 +commands = + functest-run-suite --keep-model --dev + +[testenv:func-target] +basepython = python3 +commands = + functest-run-suite --keep-model --bundle {posargs} + +[flake8] +# Ignore E902 because the unit_tests directory is missing in the built charm. +ignore = E402,E226,W503,W504,E902 diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py new file mode 100644 index 0000000..e163492 --- /dev/null +++ b/unit_tests/__init__.py @@ -0,0 +1,2 @@ +import ops.testing +ops.testing.SIMULATE_CAN_CONNECT = True diff --git a/unit_tests/test_charm.py b/unit_tests/test_charm.py new file mode 100644 index 0000000..c40695b --- /dev/null +++ b/unit_tests/test_charm.py @@ -0,0 +1,130 @@ +# Copyright 2021 OpenStack Charmers +# See LICENSE file for licensing details. +# +# Learn more about testing at: https://juju.is/docs/sdk/testing + + +import sys + +sys.path.append('lib') +sys.path.append('src') +sys.path.append('unit_tests') + +import charm +import unittest +import test_utils + +from unittest.mock import ( + patch, + call, + Mock +) +from ops.testing import Harness + + +class TestCloudkittyCharm(charm.CloudkittyCharm): + """ + Workaround until 'network-get' call gets mocked + See https://github.com/canonical/operator/issues/456 + See https://github.com/canonical/operator/issues/222 + """ + @property + def host(self): + return '10.0.0.10' + + +class TestCharm(unittest.TestCase): + def setUp(self): + self.harness = Harness(TestCloudkittyCharm) + + self.harness.set_leader(True) + self.harness.disable_hooks() + + self.addCleanup(self.harness.cleanup) + self.harness.begin() + + @patch('charmhelpers.core.host.mkdir') + @patch('charmhelpers.core.host.write_file') + def assertContent(self, expected_entries, _write_file, _mkdir): + # check rendered content + content = self.harness.charm._render_config(Mock()) + for entry in expected_entries: + self.assertIn(entry, content) + + @patch('ops_openstack.core.apt_update') + @patch('ops_openstack.core.apt_install') + def test_on_install(self, _install, _update): + self.harness.charm.on_install(Mock()) + _update.assert_called_with(fatal=True) + _install.assert_called_with(TestCloudkittyCharm.PACKAGES, fatal=True) + + def test_config_changed(self): + # change application config + self.harness.update_config({'debug': True}) + + # check rendered content + self.assertContent(['debug = True']) + + def test_identity_service_relation(self): + # add identity-service relation + test_utils.add_complete_identity_relation(self.harness) + + # check rendered content + expected_entries = [ + 'auth_protocol = http', + 'auth_uri = http://keystone.local:5000/v3', + 'auth_url = http://keystone.local:12345/v3', + 'project_domain_name = servicedom', + 'user_domain_name = servicedom', + 'identity_uri = http://keystone.local:5000/v3', + 'project_name = svcproj1', + 'username = svcuser1', + 'password = svcpass1', + 'region_name = RegionOne' + ] + self.assertContent(expected_entries) + + def test_database_relation(self): + # add database relation + test_utils.add_complete_database_relation(self.harness) + + # check rendered content + expected_entries = [ + 'mysql+pymysql://dbuser:strongpass@juju-unit-1:3306/cloudkitty' + ] + + self.assertContent(expected_entries) + + @patch('subprocess.check_call', autospec=True) + def test_database_migration(self, _check_call): + self.harness.charm._bootstrap_db() + + calls = [ + call(['cloudkitty-storage-init']), + call(['cloudkitty-dbsync', 'upgrade']) + ] + _check_call.assert_has_calls(calls) + + def test_gnocchi_relation(self): + test_utils.add_complete_metric_relation(self.harness) + + # check rendered content + expected_entries = [ + 'gnocchi_endpoint = http://10.0.0.1:8041' + ] + + self.assertContent(expected_entries) + + def test_rabbitmq_relation(self): + test_utils.add_complete_rabbitmq_relation(self.harness) + + # check rendered content + url = 'rabbit://cloudkitty:strong_password@10.0.0.1:5672/cloudkitty' + + expected_entries = [ + '[oslo_messaging_notifications]', + 'driver = messagingv2', + 'transport_url = ' + url, + ] + + self.assertContent(expected_entries) diff --git a/unit_tests/test_utils.py b/unit_tests/test_utils.py new file mode 100644 index 0000000..0989c9b --- /dev/null +++ b/unit_tests/test_utils.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module containing shared code to be used in a charms units tests.""" + +from ops.testing import Harness + + +def add_base_identity_service_relation(harness: Harness) -> str: + """Add identity-service relation.""" + rel_id = harness.add_relation("identity-service", "keystone") + harness.add_relation_unit(rel_id, "keystone/0") + harness.update_relation_data( + rel_id, "keystone/0", {"ingress-address": "10.0.0.33"} + ) + return rel_id + + +def add_identity_service_relation_response( + harness: Harness, rel_id: str +) -> None: + """Add id service data to identity-service relation.""" + harness.update_relation_data( + rel_id, + "keystone", + { + "admin-domain-id": "admindomid1", + "admin-project-id": "adminprojid1", + "admin-user-id": "adminuserid1", + "api-version": "3", + "auth-host": "keystone.local", + "auth-port": "12345", + "auth-protocol": "http", + "internal-host": "keystone.internal", + "internal-port": "5000", + "internal-protocol": "http", + "service-domain-name": "servicedom", + "service-domain_id": "svcdomid1", + "service-host": "keystone.service", + "service-password": "svcpass1", + "service-port": "5000", + "service-protocol": "http", + "service-project-name": "svcproj1", + "service-project-id": "svcprojid1", + "service-user-name": "svcuser1", + }, + ) + + +def add_complete_identity_relation(harness: Harness) -> None: + """Add complete Identity relation.""" + rel_id = add_base_identity_service_relation(harness) + add_identity_service_relation_response( + harness, + rel_id) + return rel_id + + +def add_base_database_service_relation(harness: Harness) -> str: + """Add database relation.""" + rel_id = harness.add_relation("database", "mysql") + harness.add_relation_unit(rel_id, "mysql/0") + harness.update_relation_data( + rel_id, "mysql/0", {"ingress-address": "10.0.0.33"} + ) + return rel_id + + +def add_database_service_relation_response( + harness: Harness, rel_id: str +) -> None: + """Add database data to database relation.""" + harness.update_relation_data( + rel_id, + "mysql", + { + 'endpoints': 'juju-unit-1:3306', + 'password': 'strongpass', + 'read-only-endpoints': 'juju-unit-2:3306', + 'username': 'dbuser', + 'version': '8.0.30-0ubuntu0.20.04.2' + }, + ) + + +def add_complete_database_relation(harness: Harness) -> None: + """Add complete Database relation.""" + rel_id = add_base_database_service_relation(harness) + add_database_service_relation_response( + harness, + rel_id) + return rel_id + + +def add_base_metric_service_relation(harness: Harness) -> int: + """Add metric-service relation.""" + rel_id = harness.add_relation("metric-service", "gnocchi") + harness.add_relation_unit(rel_id, "gnocchi/0") + harness.update_relation_data( + rel_id, "gnocchi/0", { + "egress-subnets": "10.0.0.1/32", + "ingress-address": "10.0.0.1", + "private-address": "10.0.0.1", + } + ) + return rel_id + + +def add_metric_service_relation_response( + harness: Harness, rel_id: str +) -> None: + """Add gnocchi data to metric-service relation.""" + harness.update_relation_data( + rel_id, "gnocchi/0", { + "gnocchi_url": "http://10.0.0.1:8041", + } + ) + + +def add_complete_metric_relation(harness: Harness) -> int: + """Add complete metric-service relation.""" + rel_id = add_base_metric_service_relation(harness) + add_metric_service_relation_response(harness, rel_id) + return rel_id + + +def add_base_rabbitmq_relation(harness: Harness) -> int: + """Add rabbitmq relation.""" + rel_id = harness.add_relation("amqp", "rabbitmq-server") + harness.add_relation_unit(rel_id, "rabbitmq-server/0") + harness.update_relation_data( + rel_id, "rabbitmq-server/0", { + "egress-subnets": "10.0.0.1/32", + "ingress-address": "10.0.0.1", + "private-address": "10.0.0.1", + } + ) + return rel_id + + +def add_rabbitmq_relation_response( + harness: Harness, rel_id: str +) -> None: + """Add rabbitmq data to amqp relation.""" + harness.update_relation_data( + rel_id, "rabbitmq-server/0", { + "user": "cloudkitty", + "vhost": "cloudkitty", + "password": "strong_password", + "hostname": "10.0.0.1", + } + ) + + +def add_complete_rabbitmq_relation(harness: Harness) -> int: + """Add complete rabbitmq relation.""" + rel_id = add_base_rabbitmq_relation(harness) + add_rabbitmq_relation_response(harness, rel_id) + return rel_id