From 9f720ecdd7eea15e69b781e44d861623460090e6 Mon Sep 17 00:00:00 2001 From: "Mehdi Abaakouk (sileht)" Date: Mon, 5 Jun 2017 17:04:30 +0000 Subject: [PATCH] Revert "Revert "Retire project"" This reverts commit bc81f1c699923e44931398eb7e6c873a16dfcb6b. Change-Id: I7a7bc3deeadd094d7a42b47b16cde2e8a8805a24 --- .gitignore | 13 - .gitreview | 4 - .testr.conf | 5 - .travis.yml | 44 - LICENSE | 176 -- MANIFEST.in | 1 - README | 10 + README.rst | 14 - bindep.txt | 10 - devstack/README.rst | 15 - devstack/apache-gnocchi.template | 10 - devstack/apache-ported-gnocchi.template | 15 - devstack/gate/gate_hook.sh | 59 - devstack/gate/post_test_hook.sh | 78 - devstack/plugin.sh | 474 ---- devstack/settings | 65 - doc/source/_static/gnocchi-icon-source.png | Bin 12573 -> 0 bytes doc/source/_static/gnocchi-icon.ico | Bin 370334 -> 0 bytes doc/source/_static/gnocchi-logo.png | Bin 92830 -> 0 bytes doc/source/architecture.png | Bin 60234 -> 0 bytes doc/source/architecture.rst | 82 - doc/source/client.rst | 13 - doc/source/collectd.rst | 14 - doc/source/conf.py | 197 -- doc/source/glossary.rst | 33 - doc/source/grafana-screenshot.png | Bin 82601 -> 0 bytes doc/source/grafana.rst | 52 - doc/source/index.rst | 70 - doc/source/install.rst | 191 -- doc/source/nagios.rst | 19 - doc/source/releasenotes/2.1.rst | 6 - doc/source/releasenotes/2.2.rst | 6 - doc/source/releasenotes/3.0.rst | 6 - doc/source/releasenotes/3.1.rst | 6 - doc/source/releasenotes/index.rst | 11 - doc/source/releasenotes/unreleased.rst | 5 - doc/source/rest.j2 | 586 ----- doc/source/rest.yaml | 749 ------- doc/source/running.rst | 246 --- doc/source/statsd.rst | 43 - gnocchi/__init__.py | 0 gnocchi/aggregates/__init__.py | 50 - gnocchi/aggregates/moving_stats.py | 145 -- gnocchi/archive_policy.py | 247 --- gnocchi/carbonara.py | 980 --------- gnocchi/cli.py | 317 --- gnocchi/exceptions.py | 19 - gnocchi/genconfig.py | 29 - gnocchi/gendoc.py | 178 -- gnocchi/gnocchi-config-generator.conf | 11 - gnocchi/indexer/__init__.py | 411 ---- gnocchi/indexer/alembic/alembic.ini | 3 - gnocchi/indexer/alembic/env.py | 90 - gnocchi/indexer/alembic/script.py.mako | 36 - ...ed97e5b3_add_tablename_to_resource_type.py | 54 - ...1ac1f4c_add_original_resource_id_column.py | 40 - .../versions/1c98ac614015_initial_base.py | 267 --- ...a63d3d186_original_resource_id_not_null.py | 66 - ...c2_allow_volume_display_name_to_be_null.py | 41 - ...205ff_add_updating_resource_type_states.py | 89 - .../2e0b912062d1_drop_useless_enum.py | 39 - .../34c517bcc2dd_shorter_foreign_key.py | 91 - ...2b8e_create_instance_disk_and_instance_.py | 103 - ...397987e38570_no_more_slash_and_reencode.py | 184 -- ...9b7d449d46a_create_metric_status_column.py | 49 - .../40c6aae14c3f_ck_started_before_ended.py | 39 - ...e25f8_alter_flavorid_from_int_to_string.py | 38 - ...469b308577a9_allow_image_ref_to_be_null.py | 41 - .../5c4f93e5bb4_mysql_float_to_timestamp.py | 77 - .../62a8dfb139bb_change_uuid_to_string.py | 249 --- ...7e6f9d542f8b_resource_type_state_column.py | 43 - ...828c16f70cce_create_resource_type_table.py | 85 - ...6189b9eb_migrate_legacy_resources_to_db.py | 48 - .../versions/9901e5ea4b6e_create_host.py | 127 -- .../a54c57ada3f5_removes_useless_indexes.py | 72 - .../aba5a217ca9b_merge_created_in_creator.py | 53 - ...c62df18bf4ee_add_unit_column_for_metric.py | 38 - ...7c22ab0_add_attributes_to_resource_type.py | 38 - .../ed9c6ddc5c35_fix_host_foreign_key.py | 53 - .../versions/f7d44b47928_uuid_to_binary.py | 89 - ...beec0b0_migrate_legacy_resources_to_db2.py | 65 - gnocchi/indexer/sqlalchemy.py | 1235 ----------- gnocchi/indexer/sqlalchemy_base.py | 443 ---- gnocchi/indexer/sqlalchemy_extension.py | 56 - .../indexer/sqlalchemy_legacy_resources.py | 78 - gnocchi/json.py | 58 - gnocchi/opts.py | 167 -- gnocchi/resource_type.py | 266 --- gnocchi/rest/__init__.py | 1785 --------------- gnocchi/rest/api-paste.ini | 46 - gnocchi/rest/app.py | 143 -- gnocchi/rest/app.wsgi | 29 - gnocchi/rest/auth_helper.py | 125 -- gnocchi/rest/policy.json | 42 - gnocchi/service.py | 93 - gnocchi/statsd.py | 195 -- gnocchi/storage/__init__.py | 372 ---- gnocchi/storage/_carbonara.py | 571 ----- gnocchi/storage/ceph.py | 203 -- gnocchi/storage/common/__init__.py | 0 gnocchi/storage/common/ceph.py | 100 - gnocchi/storage/common/redis.py | 129 -- gnocchi/storage/common/s3.py | 81 - gnocchi/storage/common/swift.py | 70 - gnocchi/storage/file.py | 151 -- gnocchi/storage/incoming/__init__.py | 64 - gnocchi/storage/incoming/_carbonara.py | 138 -- gnocchi/storage/incoming/ceph.py | 225 -- gnocchi/storage/incoming/file.py | 165 -- gnocchi/storage/incoming/redis.py | 85 - gnocchi/storage/incoming/s3.py | 177 -- gnocchi/storage/incoming/swift.py | 114 - gnocchi/storage/redis.py | 114 - gnocchi/storage/s3.py | 221 -- gnocchi/storage/swift.py | 185 -- gnocchi/tempest/__init__.py | 0 gnocchi/tempest/config.py | 33 - gnocchi/tempest/plugin.py | 42 - gnocchi/tempest/scenario/__init__.py | 110 - gnocchi/tests/__init__.py | 0 gnocchi/tests/base.py | 335 --- gnocchi/tests/functional/__init__.py | 0 gnocchi/tests/functional/fixtures.py | 189 -- .../tests/functional/gabbits/aggregation.yaml | 341 --- .../functional/gabbits/archive-rule.yaml | 197 -- gnocchi/tests/functional/gabbits/archive.yaml | 568 ----- gnocchi/tests/functional/gabbits/async.yaml | 71 - gnocchi/tests/functional/gabbits/base.yaml | 168 -- .../functional/gabbits/batch-measures.yaml | 295 --- gnocchi/tests/functional/gabbits/cors.yaml | 21 - .../tests/functional/gabbits/healthcheck.yaml | 7 - gnocchi/tests/functional/gabbits/history.yaml | 160 -- .../gabbits/metric-granularity.yaml | 60 - .../tests/functional/gabbits/metric-list.yaml | 142 -- .../gabbits/metric-timestamp-format.yaml | 60 - gnocchi/tests/functional/gabbits/metric.yaml | 331 --- .../tests/functional/gabbits/pagination.yaml | 506 ----- .../gabbits/resource-aggregation.yaml | 169 -- .../functional/gabbits/resource-type.yaml | 772 ------- .../tests/functional/gabbits/resource.yaml | 1106 ---------- .../functional/gabbits/search-metric.yaml | 143 -- gnocchi/tests/functional/gabbits/search.yaml | 89 - .../functional/gabbits/transformedids.yaml | 184 -- gnocchi/tests/functional/test_gabbi.py | 35 - gnocchi/tests/functional/test_gabbi_prefix.py | 34 - gnocchi/tests/functional_live/__init__.py | 0 .../tests/functional_live/gabbits/live.yaml | 739 ------- .../gabbits/search-resource.yaml | 275 --- .../tests/functional_live/test_gabbi_live.py | 48 - gnocchi/tests/indexer/__init__.py | 0 gnocchi/tests/indexer/sqlalchemy/__init__.py | 0 .../indexer/sqlalchemy/test_migrations.py | 92 - gnocchi/tests/test_aggregates.py | 116 - gnocchi/tests/test_archive_policy.py | 98 - gnocchi/tests/test_bin.py | 24 - gnocchi/tests/test_carbonara.py | 1292 ----------- gnocchi/tests/test_indexer.py | 1245 ----------- gnocchi/tests/test_rest.py | 1915 ----------------- gnocchi/tests/test_statsd.py | 160 -- gnocchi/tests/test_storage.py | 1001 --------- gnocchi/tests/test_utils.py | 105 - gnocchi/tests/utils.py | 19 - gnocchi/utils.py | 299 --- releasenotes/notes/.placeholder | 0 ...arameter-granularity-7f22c677dc1b1238.yaml | 4 - .../archive_policy_bool-9313cae7122c4a2f.yaml | 5 - .../auth_type_option-c335b219afba5569.yaml | 5 - .../auth_type_pluggable-76a3c73cac8eec6a.yaml | 5 - ...ll-cross-aggregation-2de54c7c30b2eb67.yaml | 6 - ...sures_create_metrics-f73790a8475ad628.yaml | 5 - .../notes/ceph-omap-34e069dfb3df764d.yaml | 5 - .../ceph-read-async-ca2f7512c6842adb.yaml | 4 - .../notes/creator_field-6b715c917f6afc93.yaml | 6 - .../delete-resources-f10d21fc02f53f16.yaml | 3 - .../deprecate-noauth-01b7e961d9a17e9e.yaml | 4 - .../dynamic-resampling-b5e545b1485c152f.yaml | 6 - .../fnmatch-python-2.7-c524ce1e1b238b0a.yaml | 5 - .../notes/forbid-slash-b3ec2bc77cc34b49.yaml | 7 - ...chi_config_generator-0fc337ba8e3afd5f.yaml | 5 - ...althcheck-middleware-81c2f0d02ebdb5cc.yaml | 5 - .../incoming-sacks-413f4818882ab83d.yaml | 14 - ...ult-archive-policies-455561c027edf4ad.yaml | 5 - ...sql_precise_datetime-57f868f3f42302e2.yaml | 4 - ...noauth-force-headers-dda926ce83f810e8.yaml | 5 - ...auth-keystone-compat-e8f760591d593f07.yaml | 9 - .../pecan-debug-removed-1a9dbc4a0a6ad581.yaml | 3 - .../notes/redis-driver-299dc443170364bc.yaml | 5 - .../notes/reloading-734a639a667c93ee.yaml | 6 - ...ceilometer-resources-16da2061d6d3f506.yaml | 3 - ...-aggregation-methods-2f5ec059855e17f9.yaml | 5 - .../resource-type-patch-8b6a85009db0671c.yaml | 6 - ...-required-attributes-f446c220d54c8eb7.yaml | 6 - .../s3-bucket-limit-224951bb6a81ddce.yaml | 8 - ...stency_check_timeout-a30db3bd07a9a281.yaml | 9 - .../notes/s3_driver-4b30122bdbe0385d.yaml | 5 - .../storage-engine-v3-b34bd0723abf292f.yaml | 13 - .../storage-incoming-586b3e81de8deb4f.yaml | 6 - .../swift_keystone_v3-606da8228fc13a32.yaml | 3 - ...val-from-2.2-and-3.0-a01fc64ecb39c327.yaml | 4 - .../notes/uuid5-change-8a8c467d2b2d4c85.yaml | 12 - ...i-script-deprecation-c6753a844ca0b411.yaml | 7 - requirements.txt | 24 - run-func-tests.sh | 52 - run-tests.sh | 31 - run-upgrade-tests.sh | 99 - setup.cfg | 158 -- setup.py | 21 - tools/duration_perf_analyse.py | 79 - tools/duration_perf_test.py | 194 -- tools/gnocchi-archive-policy-size.py | 49 - tools/measures_injector.py | 62 - tools/pretty_tox.sh | 16 - tools/travis-ci-setup.dockerfile | 41 - tox.ini | 139 -- 214 files changed, 10 insertions(+), 30717 deletions(-) delete mode 100644 .gitignore delete mode 100644 .gitreview delete mode 100644 .testr.conf delete mode 100644 .travis.yml delete mode 100644 LICENSE delete mode 100644 MANIFEST.in create mode 100644 README delete mode 100644 README.rst delete mode 100644 bindep.txt delete mode 100644 devstack/README.rst delete mode 100644 devstack/apache-gnocchi.template delete mode 100644 devstack/apache-ported-gnocchi.template delete mode 100755 devstack/gate/gate_hook.sh delete mode 100755 devstack/gate/post_test_hook.sh delete mode 100644 devstack/plugin.sh delete mode 100644 devstack/settings delete mode 100644 doc/source/_static/gnocchi-icon-source.png delete mode 100644 doc/source/_static/gnocchi-icon.ico delete mode 100644 doc/source/_static/gnocchi-logo.png delete mode 100644 doc/source/architecture.png delete mode 100755 doc/source/architecture.rst delete mode 100644 doc/source/client.rst delete mode 100644 doc/source/collectd.rst delete mode 100644 doc/source/conf.py delete mode 100644 doc/source/glossary.rst delete mode 100644 doc/source/grafana-screenshot.png delete mode 100644 doc/source/grafana.rst delete mode 100644 doc/source/index.rst delete mode 100644 doc/source/install.rst delete mode 100644 doc/source/nagios.rst delete mode 100644 doc/source/releasenotes/2.1.rst delete mode 100644 doc/source/releasenotes/2.2.rst delete mode 100644 doc/source/releasenotes/3.0.rst delete mode 100644 doc/source/releasenotes/3.1.rst delete mode 100644 doc/source/releasenotes/index.rst delete mode 100644 doc/source/releasenotes/unreleased.rst delete mode 100644 doc/source/rest.j2 delete mode 100644 doc/source/rest.yaml delete mode 100644 doc/source/running.rst delete mode 100644 doc/source/statsd.rst delete mode 100644 gnocchi/__init__.py delete mode 100644 gnocchi/aggregates/__init__.py delete mode 100644 gnocchi/aggregates/moving_stats.py delete mode 100644 gnocchi/archive_policy.py delete mode 100644 gnocchi/carbonara.py delete mode 100644 gnocchi/cli.py delete mode 100644 gnocchi/exceptions.py delete mode 100644 gnocchi/genconfig.py delete mode 100644 gnocchi/gendoc.py delete mode 100644 gnocchi/gnocchi-config-generator.conf delete mode 100644 gnocchi/indexer/__init__.py delete mode 100644 gnocchi/indexer/alembic/alembic.ini delete mode 100644 gnocchi/indexer/alembic/env.py delete mode 100644 gnocchi/indexer/alembic/script.py.mako delete mode 100644 gnocchi/indexer/alembic/versions/0718ed97e5b3_add_tablename_to_resource_type.py delete mode 100644 gnocchi/indexer/alembic/versions/1c2c61ac1f4c_add_original_resource_id_column.py delete mode 100644 gnocchi/indexer/alembic/versions/1c98ac614015_initial_base.py delete mode 100644 gnocchi/indexer/alembic/versions/1e1a63d3d186_original_resource_id_not_null.py delete mode 100644 gnocchi/indexer/alembic/versions/1f21cbdd6bc2_allow_volume_display_name_to_be_null.py delete mode 100644 gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py delete mode 100644 gnocchi/indexer/alembic/versions/2e0b912062d1_drop_useless_enum.py delete mode 100644 gnocchi/indexer/alembic/versions/34c517bcc2dd_shorter_foreign_key.py delete mode 100644 gnocchi/indexer/alembic/versions/3901f5ea2b8e_create_instance_disk_and_instance_.py delete mode 100644 gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py delete mode 100644 gnocchi/indexer/alembic/versions/39b7d449d46a_create_metric_status_column.py delete mode 100644 gnocchi/indexer/alembic/versions/40c6aae14c3f_ck_started_before_ended.py delete mode 100644 gnocchi/indexer/alembic/versions/42ee7f3e25f8_alter_flavorid_from_int_to_string.py delete mode 100644 gnocchi/indexer/alembic/versions/469b308577a9_allow_image_ref_to_be_null.py delete mode 100644 gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py delete mode 100644 gnocchi/indexer/alembic/versions/62a8dfb139bb_change_uuid_to_string.py delete mode 100644 gnocchi/indexer/alembic/versions/7e6f9d542f8b_resource_type_state_column.py delete mode 100644 gnocchi/indexer/alembic/versions/828c16f70cce_create_resource_type_table.py delete mode 100644 gnocchi/indexer/alembic/versions/8f376189b9eb_migrate_legacy_resources_to_db.py delete mode 100644 gnocchi/indexer/alembic/versions/9901e5ea4b6e_create_host.py delete mode 100644 gnocchi/indexer/alembic/versions/a54c57ada3f5_removes_useless_indexes.py delete mode 100644 gnocchi/indexer/alembic/versions/aba5a217ca9b_merge_created_in_creator.py delete mode 100644 gnocchi/indexer/alembic/versions/c62df18bf4ee_add_unit_column_for_metric.py delete mode 100644 gnocchi/indexer/alembic/versions/d24877c22ab0_add_attributes_to_resource_type.py delete mode 100644 gnocchi/indexer/alembic/versions/ed9c6ddc5c35_fix_host_foreign_key.py delete mode 100644 gnocchi/indexer/alembic/versions/f7d44b47928_uuid_to_binary.py delete mode 100644 gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py delete mode 100644 gnocchi/indexer/sqlalchemy.py delete mode 100644 gnocchi/indexer/sqlalchemy_base.py delete mode 100644 gnocchi/indexer/sqlalchemy_extension.py delete mode 100644 gnocchi/indexer/sqlalchemy_legacy_resources.py delete mode 100644 gnocchi/json.py delete mode 100644 gnocchi/opts.py delete mode 100644 gnocchi/resource_type.py delete mode 100644 gnocchi/rest/__init__.py delete mode 100644 gnocchi/rest/api-paste.ini delete mode 100644 gnocchi/rest/app.py delete mode 100644 gnocchi/rest/app.wsgi delete mode 100644 gnocchi/rest/auth_helper.py delete mode 100644 gnocchi/rest/policy.json delete mode 100644 gnocchi/service.py delete mode 100644 gnocchi/statsd.py delete mode 100644 gnocchi/storage/__init__.py delete mode 100644 gnocchi/storage/_carbonara.py delete mode 100644 gnocchi/storage/ceph.py delete mode 100644 gnocchi/storage/common/__init__.py delete mode 100644 gnocchi/storage/common/ceph.py delete mode 100644 gnocchi/storage/common/redis.py delete mode 100644 gnocchi/storage/common/s3.py delete mode 100644 gnocchi/storage/common/swift.py delete mode 100644 gnocchi/storage/file.py delete mode 100644 gnocchi/storage/incoming/__init__.py delete mode 100644 gnocchi/storage/incoming/_carbonara.py delete mode 100644 gnocchi/storage/incoming/ceph.py delete mode 100644 gnocchi/storage/incoming/file.py delete mode 100644 gnocchi/storage/incoming/redis.py delete mode 100644 gnocchi/storage/incoming/s3.py delete mode 100644 gnocchi/storage/incoming/swift.py delete mode 100644 gnocchi/storage/redis.py delete mode 100644 gnocchi/storage/s3.py delete mode 100644 gnocchi/storage/swift.py delete mode 100644 gnocchi/tempest/__init__.py delete mode 100644 gnocchi/tempest/config.py delete mode 100644 gnocchi/tempest/plugin.py delete mode 100644 gnocchi/tempest/scenario/__init__.py delete mode 100644 gnocchi/tests/__init__.py delete mode 100644 gnocchi/tests/base.py delete mode 100644 gnocchi/tests/functional/__init__.py delete mode 100644 gnocchi/tests/functional/fixtures.py delete mode 100644 gnocchi/tests/functional/gabbits/aggregation.yaml delete mode 100644 gnocchi/tests/functional/gabbits/archive-rule.yaml delete mode 100644 gnocchi/tests/functional/gabbits/archive.yaml delete mode 100644 gnocchi/tests/functional/gabbits/async.yaml delete mode 100644 gnocchi/tests/functional/gabbits/base.yaml delete mode 100644 gnocchi/tests/functional/gabbits/batch-measures.yaml delete mode 100644 gnocchi/tests/functional/gabbits/cors.yaml delete mode 100644 gnocchi/tests/functional/gabbits/healthcheck.yaml delete mode 100644 gnocchi/tests/functional/gabbits/history.yaml delete mode 100644 gnocchi/tests/functional/gabbits/metric-granularity.yaml delete mode 100644 gnocchi/tests/functional/gabbits/metric-list.yaml delete mode 100644 gnocchi/tests/functional/gabbits/metric-timestamp-format.yaml delete mode 100644 gnocchi/tests/functional/gabbits/metric.yaml delete mode 100644 gnocchi/tests/functional/gabbits/pagination.yaml delete mode 100644 gnocchi/tests/functional/gabbits/resource-aggregation.yaml delete mode 100644 gnocchi/tests/functional/gabbits/resource-type.yaml delete mode 100644 gnocchi/tests/functional/gabbits/resource.yaml delete mode 100644 gnocchi/tests/functional/gabbits/search-metric.yaml delete mode 100644 gnocchi/tests/functional/gabbits/search.yaml delete mode 100644 gnocchi/tests/functional/gabbits/transformedids.yaml delete mode 100644 gnocchi/tests/functional/test_gabbi.py delete mode 100644 gnocchi/tests/functional/test_gabbi_prefix.py delete mode 100644 gnocchi/tests/functional_live/__init__.py delete mode 100644 gnocchi/tests/functional_live/gabbits/live.yaml delete mode 100644 gnocchi/tests/functional_live/gabbits/search-resource.yaml delete mode 100644 gnocchi/tests/functional_live/test_gabbi_live.py delete mode 100644 gnocchi/tests/indexer/__init__.py delete mode 100644 gnocchi/tests/indexer/sqlalchemy/__init__.py delete mode 100644 gnocchi/tests/indexer/sqlalchemy/test_migrations.py delete mode 100644 gnocchi/tests/test_aggregates.py delete mode 100644 gnocchi/tests/test_archive_policy.py delete mode 100644 gnocchi/tests/test_bin.py delete mode 100644 gnocchi/tests/test_carbonara.py delete mode 100644 gnocchi/tests/test_indexer.py delete mode 100644 gnocchi/tests/test_rest.py delete mode 100644 gnocchi/tests/test_statsd.py delete mode 100644 gnocchi/tests/test_storage.py delete mode 100644 gnocchi/tests/test_utils.py delete mode 100644 gnocchi/tests/utils.py delete mode 100644 gnocchi/utils.py delete mode 100644 releasenotes/notes/.placeholder delete mode 100644 releasenotes/notes/add-parameter-granularity-7f22c677dc1b1238.yaml delete mode 100644 releasenotes/notes/archive_policy_bool-9313cae7122c4a2f.yaml delete mode 100644 releasenotes/notes/auth_type_option-c335b219afba5569.yaml delete mode 100644 releasenotes/notes/auth_type_pluggable-76a3c73cac8eec6a.yaml delete mode 100644 releasenotes/notes/backfill-cross-aggregation-2de54c7c30b2eb67.yaml delete mode 100644 releasenotes/notes/batch_resource_measures_create_metrics-f73790a8475ad628.yaml delete mode 100644 releasenotes/notes/ceph-omap-34e069dfb3df764d.yaml delete mode 100644 releasenotes/notes/ceph-read-async-ca2f7512c6842adb.yaml delete mode 100644 releasenotes/notes/creator_field-6b715c917f6afc93.yaml delete mode 100644 releasenotes/notes/delete-resources-f10d21fc02f53f16.yaml delete mode 100644 releasenotes/notes/deprecate-noauth-01b7e961d9a17e9e.yaml delete mode 100644 releasenotes/notes/dynamic-resampling-b5e545b1485c152f.yaml delete mode 100644 releasenotes/notes/fnmatch-python-2.7-c524ce1e1b238b0a.yaml delete mode 100644 releasenotes/notes/forbid-slash-b3ec2bc77cc34b49.yaml delete mode 100644 releasenotes/notes/gnocchi_config_generator-0fc337ba8e3afd5f.yaml delete mode 100644 releasenotes/notes/healthcheck-middleware-81c2f0d02ebdb5cc.yaml delete mode 100644 releasenotes/notes/incoming-sacks-413f4818882ab83d.yaml delete mode 100644 releasenotes/notes/lighten-default-archive-policies-455561c027edf4ad.yaml delete mode 100644 releasenotes/notes/mysql_precise_datetime-57f868f3f42302e2.yaml delete mode 100644 releasenotes/notes/noauth-force-headers-dda926ce83f810e8.yaml delete mode 100644 releasenotes/notes/noauth-keystone-compat-e8f760591d593f07.yaml delete mode 100644 releasenotes/notes/pecan-debug-removed-1a9dbc4a0a6ad581.yaml delete mode 100644 releasenotes/notes/redis-driver-299dc443170364bc.yaml delete mode 100644 releasenotes/notes/reloading-734a639a667c93ee.yaml delete mode 100644 releasenotes/notes/remove-legacy-ceilometer-resources-16da2061d6d3f506.yaml delete mode 100644 releasenotes/notes/removed-median-and-95pct-from-default-aggregation-methods-2f5ec059855e17f9.yaml delete mode 100644 releasenotes/notes/resource-type-patch-8b6a85009db0671c.yaml delete mode 100644 releasenotes/notes/resource-type-required-attributes-f446c220d54c8eb7.yaml delete mode 100644 releasenotes/notes/s3-bucket-limit-224951bb6a81ddce.yaml delete mode 100644 releasenotes/notes/s3_consistency_check_timeout-a30db3bd07a9a281.yaml delete mode 100644 releasenotes/notes/s3_driver-4b30122bdbe0385d.yaml delete mode 100644 releasenotes/notes/storage-engine-v3-b34bd0723abf292f.yaml delete mode 100644 releasenotes/notes/storage-incoming-586b3e81de8deb4f.yaml delete mode 100644 releasenotes/notes/swift_keystone_v3-606da8228fc13a32.yaml delete mode 100644 releasenotes/notes/upgrade-code-removal-from-2.2-and-3.0-a01fc64ecb39c327.yaml delete mode 100644 releasenotes/notes/uuid5-change-8a8c467d2b2d4c85.yaml delete mode 100644 releasenotes/notes/wsgi-script-deprecation-c6753a844ca0b411.yaml delete mode 100644 requirements.txt delete mode 100755 run-func-tests.sh delete mode 100755 run-tests.sh delete mode 100755 run-upgrade-tests.sh delete mode 100644 setup.cfg delete mode 100755 setup.py delete mode 100644 tools/duration_perf_analyse.py delete mode 100644 tools/duration_perf_test.py delete mode 100755 tools/gnocchi-archive-policy-size.py delete mode 100755 tools/measures_injector.py delete mode 100755 tools/pretty_tox.sh delete mode 100644 tools/travis-ci-setup.dockerfile delete mode 100644 tox.ini diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 0562170dc..000000000 --- a/.gitignore +++ /dev/null @@ -1,13 +0,0 @@ -.testrepository -*.pyc -.tox -*.egg-info -AUTHORS -ChangeLog -etc/gnocchi/gnocchi.conf -doc/build -doc/source/rest.rst -releasenotes/build -cover -.coverage -dist diff --git a/.gitreview b/.gitreview deleted file mode 100644 index e4b8477df..000000000 --- a/.gitreview +++ /dev/null @@ -1,4 +0,0 @@ -[gerrit] -host=review.openstack.org -port=29418 -project=openstack/gnocchi.git diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index c274843c2..000000000 --- a/.testr.conf +++ /dev/null @@ -1,5 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} ${PYTHON:-python} -m subunit.run discover -t . ${OS_TEST_PATH:-gnocchi/tests} $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list -group_regex=(gabbi\.suitemaker\.test_gabbi((_prefix_|_live_|_)([^_]+)))_ diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 72b03e196..000000000 --- a/.travis.yml +++ /dev/null @@ -1,44 +0,0 @@ -language: python -sudo: required - -services: - - docker - -cache: - directories: - - ~/.cache/pip -env: - - TARGET: bashate - - TARGET: pep8 - - TARGET: docs - - TARGET: docs-gnocchi.xyz - - - TARGET: py27-mysql-ceph-upgrade-from-3.1 - - TARGET: py35-postgresql-file-upgrade-from-3.1 - - - TARGET: py27-mysql - - TARGET: py35-mysql - - TARGET: py27-postgresql - - TARGET: py35-postgresql - -before_script: -# Travis We need to fetch all tags/branches for documentation target - - case $TARGET in - docs*) - git fetch origin $(git ls-remote -q | sed -n '/refs\/heads/s,.*refs/heads\(.*\),:remotes/origin\1,gp') ; - git fetch --tags ; - git fetch --unshallow ; - ;; - esac - - - docker build --tag gnocchi-ci --file=tools/travis-ci-setup.dockerfile . -script: - - docker run -v ~/.cache/pip:/home/tester/.cache/pip -v $(pwd):/home/tester/src gnocchi-ci tox -e ${TARGET} - -notifications: - email: false - irc: - on_success: change - on_failure: always - channels: - - "irc.freenode.org#gnocchi" diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 68c771a09..000000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 8f248e6e1..000000000 --- a/MANIFEST.in +++ /dev/null @@ -1 +0,0 @@ -include etc/gnocchi/gnocchi.conf diff --git a/README b/README new file mode 100644 index 000000000..90ebc4718 --- /dev/null +++ b/README @@ -0,0 +1,10 @@ +This project has been moved to https://github.com/gnocchixyz/gnocchi + +The contents of this repository are still available in the Git +source code management system. To see the contents of this +repository before it reached its end of life, please check out the +previous commit with "git checkout HEAD^1". + +For any further questions, please email +openstack-dev@lists.openstack.org or join #openstack-dev or #gnocchi on +Freenode. diff --git a/README.rst b/README.rst deleted file mode 100644 index ca172f4d0..000000000 --- a/README.rst +++ /dev/null @@ -1,14 +0,0 @@ -=============================== - Gnocchi - Metric as a Service -=============================== - -.. image:: doc/source/_static/gnocchi-logo.png - -Gnocchi is a multi-tenant timeseries, metrics and resources database. It -provides an `HTTP REST`_ interface to create and manipulate the data. It is -designed to store metrics at a very large scale while providing access to -metrics and resources information and history. - -You can read the full documentation online at http://gnocchi.xyz. - -.. _`HTTP REST`: https://en.wikipedia.org/wiki/Representational_state_transfer diff --git a/bindep.txt b/bindep.txt deleted file mode 100644 index 9d9b91a5e..000000000 --- a/bindep.txt +++ /dev/null @@ -1,10 +0,0 @@ -libpq-dev [platform:dpkg] -postgresql [platform:dpkg] -mysql-client [platform:dpkg] -mysql-server [platform:dpkg] -build-essential [platform:dpkg] -libffi-dev [platform:dpkg] -librados-dev [platform:dpkg] -ceph [platform:dpkg] -redis-server [platform:dpkg] -liberasurecode-dev [platform:dpkg] diff --git a/devstack/README.rst b/devstack/README.rst deleted file mode 100644 index 1d6c9ed04..000000000 --- a/devstack/README.rst +++ /dev/null @@ -1,15 +0,0 @@ -============================ -Enabling Gnocchi in DevStack -============================ - -1. Download DevStack:: - - git clone https://git.openstack.org/openstack-dev/devstack.git - cd devstack - -2. Add this repo as an external repository in ``local.conf`` file:: - - [[local|localrc]] - enable_plugin gnocchi https://git.openstack.org/openstack/gnocchi - -3. Run ``stack.sh``. diff --git a/devstack/apache-gnocchi.template b/devstack/apache-gnocchi.template deleted file mode 100644 index bc2887555..000000000 --- a/devstack/apache-gnocchi.template +++ /dev/null @@ -1,10 +0,0 @@ - -WSGIDaemonProcess gnocchi lang='en_US.UTF-8' locale='en_US.UTF-8' user=%USER% display-name=%{GROUP} processes=%APIWORKERS% threads=32 %VIRTUALENV% -WSGIProcessGroup gnocchi -WSGIScriptAlias %SCRIPT_NAME% %WSGI% - - WSGIProcessGroup gnocchi - WSGIApplicationGroup %{GLOBAL} - - -WSGISocketPrefix /var/run/%APACHE_NAME% diff --git a/devstack/apache-ported-gnocchi.template b/devstack/apache-ported-gnocchi.template deleted file mode 100644 index 2a56fa8d4..000000000 --- a/devstack/apache-ported-gnocchi.template +++ /dev/null @@ -1,15 +0,0 @@ -Listen %GNOCCHI_PORT% - - - WSGIDaemonProcess gnocchi lang='en_US.UTF-8' locale='en_US.UTF-8' user=%USER% display-name=%{GROUP} processes=%APIWORKERS% threads=32 %VIRTUALENV% - WSGIProcessGroup gnocchi - WSGIScriptAlias / %WSGI% - WSGIApplicationGroup %{GLOBAL} - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/%APACHE_NAME%/gnocchi.log - CustomLog /var/log/%APACHE_NAME%/gnocchi-access.log combined - - -WSGISocketPrefix /var/run/%APACHE_NAME% diff --git a/devstack/gate/gate_hook.sh b/devstack/gate/gate_hook.sh deleted file mode 100755 index c01d37a03..000000000 --- a/devstack/gate/gate_hook.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This script is executed inside gate_hook function in devstack gate. - -STORAGE_DRIVER="$1" -SQL_DRIVER="$2" - -ENABLED_SERVICES="key,gnocchi-api,gnocchi-metricd,tempest," - -# Use efficient wsgi web server -DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_DEPLOY=uwsgi' -DEVSTACK_LOCAL_CONFIG+=$'\nexport KEYSTONE_DEPLOY=uwsgi' - -export DEVSTACK_GATE_INSTALL_TESTONLY=1 -export DEVSTACK_GATE_NO_SERVICES=1 -export DEVSTACK_GATE_TEMPEST=1 -export DEVSTACK_GATE_TEMPEST_NOTESTS=1 -export DEVSTACK_GATE_EXERCISES=0 -export KEEP_LOCALRC=1 - -case $STORAGE_DRIVER in - file) - DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_STORAGE_BACKEND=file' - ;; - swift) - ENABLED_SERVICES+="s-proxy,s-account,s-container,s-object," - DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_STORAGE_BACKEND=swift' - # FIXME(sileht): use mod_wsgi as workaround for LP#1508424 - DEVSTACK_GATE_TEMPEST+=$'\nexport SWIFT_USE_MOD_WSGI=True' - ;; - ceph) - DEVSTACK_LOCAL_CONFIG+=$'\nexport GNOCCHI_STORAGE_BACKEND=ceph' - ;; -esac - - -# default to mysql -case $SQL_DRIVER in - postgresql) - export DEVSTACK_GATE_POSTGRES=1 - ;; -esac - -export ENABLED_SERVICES -export DEVSTACK_LOCAL_CONFIG - -$BASE/new/devstack-gate/devstack-vm-gate.sh diff --git a/devstack/gate/post_test_hook.sh b/devstack/gate/post_test_hook.sh deleted file mode 100755 index f4a890860..000000000 --- a/devstack/gate/post_test_hook.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This script is executed inside post_test_hook function in devstack gate. - -source $BASE/new/devstack/openrc admin admin - -set -e - -function generate_testr_results { - if [ -f .testrepository/0 ]; then - sudo /usr/os-testr-env/bin/testr last --subunit > $WORKSPACE/testrepository.subunit - sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit - sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html - sudo gzip -9 $BASE/logs/testrepository.subunit - sudo gzip -9 $BASE/logs/testr_results.html - sudo chown jenkins:jenkins $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz - sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz - fi -} - -set -x - -export GNOCCHI_DIR="$BASE/new/gnocchi" -sudo chown -R stack:stack $GNOCCHI_DIR -cd $GNOCCHI_DIR - -openstack catalog list - -export GNOCCHI_SERVICE_TOKEN=$(openstack token issue -c id -f value) -export GNOCCHI_ENDPOINT=$(openstack catalog show metric -c endpoints -f value | awk '/public/{print $2}') -export GNOCCHI_AUTHORIZATION="" # Temporary set to transition to the new functional testing - -curl -X GET ${GNOCCHI_ENDPOINT}/v1/archive_policy -H "Content-Type: application/json" - -sudo gnocchi-upgrade - -# Just ensure tools still works -sudo -E -H -u stack $GNOCCHI_DIR/tools/measures_injector.py --metrics 1 --batch-of-measures 2 --measures-per-batch 2 - -# NOTE(sileht): on swift job permissions are wrong, I don't known why -sudo chown -R tempest:stack $BASE/new/tempest -sudo chown -R tempest:stack $BASE/data/tempest - -# Run tests with tempst -cd $BASE/new/tempest -set +e -sudo -H -u tempest OS_TEST_TIMEOUT=$TEMPEST_OS_TEST_TIMEOUT tox -eall-plugin -- gnocchi --concurrency=$TEMPEST_CONCURRENCY -TEMPEST_EXIT_CODE=$? -set -e -if [[ $TEMPEST_EXIT_CODE != 0 ]]; then - # Collect and parse result - generate_testr_results - exit $TEMPEST_EXIT_CODE -fi - -# Run tests with tox -cd $GNOCCHI_DIR -echo "Running gnocchi functional test suite" -set +e -sudo -E -H -u stack tox -epy27-gate -EXIT_CODE=$? -set -e - -# Collect and parse result -generate_testr_results -exit $EXIT_CODE diff --git a/devstack/plugin.sh b/devstack/plugin.sh deleted file mode 100644 index e1ef90b4f..000000000 --- a/devstack/plugin.sh +++ /dev/null @@ -1,474 +0,0 @@ -# Gnocchi devstack plugin -# Install and start **Gnocchi** service - -# To enable Gnocchi service, add the following to localrc: -# -# enable_plugin gnocchi https://github.com/openstack/gnocchi master -# -# This will turn on both gnocchi-api and gnocchi-metricd services. -# If you don't want one of those (you do) you can use the -# disable_service command in local.conf. - -# Dependencies: -# -# - functions -# - ``functions`` -# - ``DEST``, ``STACK_USER`` must be defined -# - ``APACHE_NAME`` for wsgi -# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined -# - ``SERVICE_HOST`` -# - ``OS_AUTH_URL``, ``KEYSTONE_SERVICE_URI`` for auth in api - -# stack.sh -# --------- -# - install_gnocchi -# - configure_gnocchi -# - init_gnocchi -# - start_gnocchi -# - stop_gnocchi -# - cleanup_gnocchi - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set -o xtrace - - -if [ -z "$GNOCCHI_DEPLOY" ]; then - # Default - GNOCCHI_DEPLOY=simple - - # Fallback to common wsgi devstack configuration - if [ "$ENABLE_HTTPD_MOD_WSGI_SERVICES" == "True" ]; then - GNOCCHI_DEPLOY=mod_wsgi - - # Deprecated config - elif [ -n "$GNOCCHI_USE_MOD_WSGI" ] ; then - echo_summary "GNOCCHI_USE_MOD_WSGI is deprecated, use GNOCCHI_DEPLOY instead" - if [ "$GNOCCHI_USE_MOD_WSGI" == True ]; then - GNOCCHI_DEPLOY=mod_wsgi - fi - fi -fi - -# Functions -# --------- - -# Test if any Gnocchi services are enabled -# is_gnocchi_enabled -function is_gnocchi_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"gnocchi-" ]] && return 0 - return 1 -} - -# Test if a Ceph services are enabled -# _is_ceph_enabled -function _is_ceph_enabled { - type is_ceph_enabled_for_service >/dev/null 2>&1 && return 0 - return 1 -} - -# create_gnocchi_accounts() - Set up common required gnocchi accounts - -# Project User Roles -# ------------------------------------------------------------------------- -# $SERVICE_TENANT_NAME gnocchi service -# gnocchi_swift gnocchi_swift ResellerAdmin (if Swift is enabled) -function create_gnocchi_accounts { - # Gnocchi - if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] && is_service_enabled gnocchi-api ; then - # At this time, the /etc/openstack/clouds.yaml is available, - # we could leverage that by setting OS_CLOUD - OLD_OS_CLOUD=$OS_CLOUD - export OS_CLOUD='devstack-admin' - - create_service_user "gnocchi" - - local gnocchi_service=$(get_or_create_service "gnocchi" \ - "metric" "OpenStack Metric Service") - get_or_create_endpoint $gnocchi_service \ - "$REGION_NAME" \ - "$(gnocchi_service_url)" \ - "$(gnocchi_service_url)" \ - "$(gnocchi_service_url)" - - if is_service_enabled swift && [[ "$GNOCCHI_STORAGE_BACKEND" = 'swift' ]] ; then - get_or_create_project "gnocchi_swift" default - local gnocchi_swift_user=$(get_or_create_user "gnocchi_swift" \ - "$SERVICE_PASSWORD" default "gnocchi_swift@example.com") - get_or_add_user_project_role "ResellerAdmin" $gnocchi_swift_user "gnocchi_swift" - fi - - export OS_CLOUD=$OLD_OS_CLOUD - fi -} - -# return the service url for gnocchi -function gnocchi_service_url { - if [[ -n $GNOCCHI_SERVICE_PORT ]]; then - echo "$GNOCCHI_SERVICE_PROTOCOL://$GNOCCHI_SERVICE_HOST:$GNOCCHI_SERVICE_PORT" - else - echo "$GNOCCHI_SERVICE_PROTOCOL://$GNOCCHI_SERVICE_HOST$GNOCCHI_SERVICE_PREFIX" - fi -} - -# install redis -# NOTE(chdent): We shouldn't rely on ceilometer being present so cannot -# use its install_redis. There are enough packages now using redis -# that there should probably be something devstack itself for -# installing it. -function _gnocchi_install_redis { - if is_ubuntu; then - install_package redis-server - restart_service redis-server - else - # This will fail (correctly) where a redis package is unavailable - install_package redis - restart_service redis - fi - - pip_install_gr redis -} - -function _gnocchi_install_grafana { - if is_ubuntu; then - local file=$(mktemp /tmp/grafanapkg-XXXXX) - wget -O "$file" "$GRAFANA_DEB_PKG" - sudo dpkg -i "$file" - rm $file - elif is_fedora; then - sudo yum install "$GRAFANA_RPM_PKG" - fi - if [ ! "$GRAFANA_PLUGIN_VERSION" ]; then - sudo grafana-cli plugins install sileht-gnocchi-datasource - elif [ "$GRAFANA_PLUGIN_VERSION" != "git" ]; then - tmpfile=/tmp/sileht-gnocchi-datasource-${GRAFANA_PLUGIN_VERSION}.tar.gz - wget https://github.com/sileht/grafana-gnocchi-datasource/releases/download/${GRAFANA_PLUGIN_VERSION}/sileht-gnocchi-datasource-${GRAFANA_PLUGIN_VERSION}.tar.gz -O $tmpfile - sudo -u grafana tar -xzf $tmpfile -C /var/lib/grafana/plugins - rm -f $file - else - git_clone ${GRAFANA_PLUGINS_REPO} ${GRAFANA_PLUGINS_DIR} - sudo ln -sf ${GRAFANA_PLUGINS_DIR}/dist /var/lib/grafana/plugins/grafana-gnocchi-datasource - # NOTE(sileht): This is long and have chance to fail, thx nodejs/npm - (cd /var/lib/grafana/plugins/grafana-gnocchi-datasource && npm install && ./run-tests.sh) || true - fi - sudo service grafana-server restart -} - -function _cleanup_gnocchi_apache_wsgi { - sudo rm -f $GNOCCHI_WSGI_DIR/*.wsgi - sudo rm -f $(apache_site_config_for gnocchi) -} - -# _config_gnocchi_apache_wsgi() - Set WSGI config files of Gnocchi -function _config_gnocchi_apache_wsgi { - sudo mkdir -p $GNOCCHI_WSGI_DIR - - local gnocchi_apache_conf=$(apache_site_config_for gnocchi) - local venv_path="" - local script_name=$GNOCCHI_SERVICE_PREFIX - - if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["gnocchi"]}/lib/$(python_version)/site-packages" - fi - - # copy wsgi file - sudo cp $GNOCCHI_DIR/gnocchi/rest/app.wsgi $GNOCCHI_WSGI_DIR/ - - # Only run the API on a custom PORT if it has been specifically - # asked for. - if [[ -n $GNOCCHI_SERVICE_PORT ]]; then - sudo cp $GNOCCHI_DIR/devstack/apache-ported-gnocchi.template $gnocchi_apache_conf - sudo sed -e " - s|%GNOCCHI_PORT%|$GNOCCHI_SERVICE_PORT|g; - " -i $gnocchi_apache_conf - else - sudo cp $GNOCCHI_DIR/devstack/apache-gnocchi.template $gnocchi_apache_conf - sudo sed -e " - s|%SCRIPT_NAME%|$script_name|g; - " -i $gnocchi_apache_conf - fi - sudo sed -e " - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%WSGI%|$GNOCCHI_WSGI_DIR/app.wsgi|g; - s|%USER%|$STACK_USER|g - s|%APIWORKERS%|$API_WORKERS|g - s|%VIRTUALENV%|$venv_path|g - " -i $gnocchi_apache_conf -} - - - -# cleanup_gnocchi() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_gnocchi { - if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then - _cleanup_gnocchi_apache_wsgi - fi -} - -# configure_gnocchi() - Set config files, create data dirs, etc -function configure_gnocchi { - [ ! -d $GNOCCHI_DATA_DIR ] && sudo mkdir -m 755 -p $GNOCCHI_DATA_DIR - sudo chown $STACK_USER $GNOCCHI_DATA_DIR - - # Configure logging - iniset $GNOCCHI_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" - iniset $GNOCCHI_CONF metricd metric_processing_delay "$GNOCCHI_METRICD_PROCESSING_DELAY" - - # Set up logging - if [ "$SYSLOG" != "False" ]; then - iniset $GNOCCHI_CONF DEFAULT use_syslog "True" - fi - - # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$GNOCCHI_DEPLOY" != "mod_wsgi" ]; then - setup_colorized_logging $GNOCCHI_CONF DEFAULT - fi - - if [ -n "$GNOCCHI_COORDINATOR_URL" ]; then - iniset $GNOCCHI_CONF storage coordination_url "$GNOCCHI_COORDINATOR_URL" - fi - - if is_service_enabled gnocchi-statsd ; then - iniset $GNOCCHI_CONF statsd resource_id $GNOCCHI_STATSD_RESOURCE_ID - iniset $GNOCCHI_CONF statsd project_id $GNOCCHI_STATSD_PROJECT_ID - iniset $GNOCCHI_CONF statsd user_id $GNOCCHI_STATSD_USER_ID - fi - - # Configure the storage driver - if _is_ceph_enabled && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then - iniset $GNOCCHI_CONF storage driver ceph - iniset $GNOCCHI_CONF storage ceph_username ${GNOCCHI_CEPH_USER} - iniset $GNOCCHI_CONF storage ceph_secret $(awk '/key/{print $3}' ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring) - elif is_service_enabled swift && [[ "$GNOCCHI_STORAGE_BACKEND" = 'swift' ]] ; then - iniset $GNOCCHI_CONF storage driver swift - iniset $GNOCCHI_CONF storage swift_user gnocchi_swift - iniset $GNOCCHI_CONF storage swift_key $SERVICE_PASSWORD - iniset $GNOCCHI_CONF storage swift_project_name "gnocchi_swift" - iniset $GNOCCHI_CONF storage swift_auth_version 3 - iniset $GNOCCHI_CONF storage swift_authurl $KEYSTONE_SERVICE_URI_V3 - elif [[ "$GNOCCHI_STORAGE_BACKEND" = 'file' ]] ; then - iniset $GNOCCHI_CONF storage driver file - iniset $GNOCCHI_CONF storage file_basepath $GNOCCHI_DATA_DIR/ - elif [[ "$GNOCCHI_STORAGE_BACKEND" = 'redis' ]] ; then - iniset $GNOCCHI_CONF storage driver redis - iniset $GNOCCHI_CONF storage redis_url $GNOCCHI_REDIS_URL - else - echo "ERROR: could not configure storage driver" - exit 1 - fi - - if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] ; then - # Configure auth token middleware - configure_auth_token_middleware $GNOCCHI_CONF gnocchi $GNOCCHI_AUTH_CACHE_DIR - iniset $GNOCCHI_CONF api auth_mode keystone - if is_service_enabled gnocchi-grafana; then - iniset $GNOCCHI_CONF cors allowed_origin ${GRAFANA_URL} - fi - else - inidelete $GNOCCHI_CONF api auth_mode - fi - - # Configure the indexer database - iniset $GNOCCHI_CONF indexer url `database_connection_url gnocchi` - - if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then - _config_gnocchi_apache_wsgi - elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then - # iniset creates these files when it's called if they don't exist. - GNOCCHI_UWSGI_FILE=$GNOCCHI_CONF_DIR/uwsgi.ini - - rm -f "$GNOCCHI_UWSGI_FILE" - - iniset "$GNOCCHI_UWSGI_FILE" uwsgi http $GNOCCHI_SERVICE_HOST:$GNOCCHI_SERVICE_PORT - iniset "$GNOCCHI_UWSGI_FILE" uwsgi wsgi-file "/usr/local/bin/gnocchi-api" - # This is running standalone - iniset "$GNOCCHI_UWSGI_FILE" uwsgi master true - # Set die-on-term & exit-on-reload so that uwsgi shuts down - iniset "$GNOCCHI_UWSGI_FILE" uwsgi die-on-term true - iniset "$GNOCCHI_UWSGI_FILE" uwsgi exit-on-reload true - iniset "$GNOCCHI_UWSGI_FILE" uwsgi threads 32 - iniset "$GNOCCHI_UWSGI_FILE" uwsgi processes $API_WORKERS - iniset "$GNOCCHI_UWSGI_FILE" uwsgi enable-threads true - iniset "$GNOCCHI_UWSGI_FILE" uwsgi plugins python - # uwsgi recommends this to prevent thundering herd on accept. - iniset "$GNOCCHI_UWSGI_FILE" uwsgi thunder-lock true - # Override the default size for headers from the 4k default. - iniset "$GNOCCHI_UWSGI_FILE" uwsgi buffer-size 65535 - # Make sure the client doesn't try to re-use the connection. - iniset "$GNOCCHI_UWSGI_FILE" uwsgi add-header "Connection: close" - # Don't share rados resources and python-requests globals between processes - iniset "$GNOCCHI_UWSGI_FILE" uwsgi lazy-apps true - fi -} - -# configure_keystone_for_gnocchi() - Configure Keystone needs for Gnocchi -function configure_keystone_for_gnocchi { - if [ "$GNOCCHI_USE_KEYSTONE" == "True" ] ; then - if is_service_enabled gnocchi-grafana; then - # NOTE(sileht): keystone configuration have to be set before uwsgi - # is started - iniset $KEYSTONE_CONF cors allowed_origin ${GRAFANA_URL} - fi - fi -} - -# configure_ceph_gnocchi() - gnocchi config needs to come after gnocchi is set up -function configure_ceph_gnocchi { - # Configure gnocchi service options, ceph pool, ceph user and ceph key - sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GNOCCHI_CEPH_POOL} ${GNOCCHI_CEPH_POOL_PG} ${GNOCCHI_CEPH_POOL_PGP} - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GNOCCHI_CEPH_POOL} size ${CEPH_REPLICAS} - if [[ $CEPH_REPLICAS -ne 1 ]]; then - sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GNOCCHI_CEPH_POOL} crush_ruleset ${RULE_ID} - - fi - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GNOCCHI_CEPH_USER} mon "allow r" osd "allow rwx pool=${GNOCCHI_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring - sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GNOCCHI_CEPH_USER}.keyring -} - - -# init_gnocchi() - Initialize etc. -function init_gnocchi { - # Create cache dir - sudo mkdir -p $GNOCCHI_AUTH_CACHE_DIR - sudo chown $STACK_USER $GNOCCHI_AUTH_CACHE_DIR - rm -f $GNOCCHI_AUTH_CACHE_DIR/* - - if is_service_enabled mysql postgresql; then - recreate_database gnocchi - fi - $GNOCCHI_BIN_DIR/gnocchi-upgrade -} - -function preinstall_gnocchi { - if is_ubuntu; then - # libpq-dev is needed to build psycopg2 - # uuid-runtime is needed to use the uuidgen command - install_package libpq-dev uuid-runtime - else - install_package postgresql-devel - fi - if [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then - install_package cython - install_package librados-dev - fi -} - -# install_gnocchi() - Collect source and prepare -function install_gnocchi { - if [[ "$GNOCCHI_STORAGE_BACKEND" = 'redis' ]] || [[ "${GNOCCHI_COORDINATOR_URL%%:*}" == "redis" ]]; then - _gnocchi_install_redis - fi - - if [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then - pip_install cradox - fi - - if is_service_enabled gnocchi-grafana - then - _gnocchi_install_grafana - fi - - [ "$GNOCCHI_USE_KEYSTONE" == "True" ] && EXTRA_FLAVOR=,keystone - - # We don't use setup_package because we don't follow openstack/requirements - sudo -H pip install -e "$GNOCCHI_DIR"[test,$GNOCCHI_STORAGE_BACKEND,${DATABASE_TYPE}${EXTRA_FLAVOR}] - - if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then - install_apache_wsgi - elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then - pip_install uwsgi - fi - - # Create configuration directory - [ ! -d $GNOCCHI_CONF_DIR ] && sudo mkdir -m 755 -p $GNOCCHI_CONF_DIR - sudo chown $STACK_USER $GNOCCHI_CONF_DIR -} - -# start_gnocchi() - Start running processes, including screen -function start_gnocchi { - - if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then - enable_apache_site gnocchi - restart_apache_server - if [[ -n $GNOCCHI_SERVICE_PORT ]]; then - tail_log gnocchi /var/log/$APACHE_NAME/gnocchi.log - tail_log gnocchi-api /var/log/$APACHE_NAME/gnocchi-access.log - else - # NOTE(chdent): At the moment this is very noisy as it - # will tail the entire apache logs, not just the gnocchi - # parts. If you don't like this either USE_SCREEN=False - # or set GNOCCHI_SERVICE_PORT. - tail_log gnocchi /var/log/$APACHE_NAME/error[_\.]log - tail_log gnocchi-api /var/log/$APACHE_NAME/access[_\.]log - fi - elif [ "$GNOCCHI_DEPLOY" == "uwsgi" ]; then - run_process gnocchi-api "$GNOCCHI_BIN_DIR/uwsgi $GNOCCHI_UWSGI_FILE" - else - run_process gnocchi-api "$GNOCCHI_BIN_DIR/gnocchi-api --port $GNOCCHI_SERVICE_PORT" - fi - # only die on API if it was actually intended to be turned on - if is_service_enabled gnocchi-api; then - - echo "Waiting for gnocchi-api to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl -v --max-time 5 --noproxy '*' -s $(gnocchi_service_url)/v1/resource/generic ; do sleep 1; done"; then - die $LINENO "gnocchi-api did not start" - fi - fi - - # run metricd last so we are properly waiting for swift and friends - run_process gnocchi-metricd "$GNOCCHI_BIN_DIR/gnocchi-metricd -d --config-file $GNOCCHI_CONF" - run_process gnocchi-statsd "$GNOCCHI_BIN_DIR/gnocchi-statsd -d --config-file $GNOCCHI_CONF" -} - -# stop_gnocchi() - Stop running processes -function stop_gnocchi { - if [ "$GNOCCHI_DEPLOY" == "mod_wsgi" ]; then - disable_apache_site gnocchi - restart_apache_server - fi - # Kill the gnocchi screen windows - for serv in gnocchi-api gnocchi-metricd gnocchi-statsd; do - stop_process $serv - done -} - -if is_service_enabled gnocchi-api; then - if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then - echo_summary "Configuring system services for Gnocchi" - preinstall_gnocchi - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing Gnocchi" - stack_install_service gnocchi - configure_keystone_for_gnocchi - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring Gnocchi" - if _is_ceph_enabled && [[ "$GNOCCHI_STORAGE_BACKEND" = 'ceph' ]] ; then - echo_summary "Configuring Gnocchi for Ceph" - configure_ceph_gnocchi - fi - configure_gnocchi - create_gnocchi_accounts - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - echo_summary "Initializing Gnocchi" - init_gnocchi - start_gnocchi - fi - - if [[ "$1" == "unstack" ]]; then - echo_summary "Stopping Gnocchi" - stop_gnocchi - fi - - if [[ "$1" == "clean" ]]; then - cleanup_gnocchi - fi -fi - -# Restore xtrace -$XTRACE - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/devstack/settings b/devstack/settings deleted file mode 100644 index 2ac7d52ab..000000000 --- a/devstack/settings +++ /dev/null @@ -1,65 +0,0 @@ -enable_service gnocchi-api -enable_service gnocchi-metricd -enable_service gnocchi-statsd - -# Set up default directories -GNOCCHI_DIR=$DEST/gnocchi -GNOCCHI_CONF_DIR=/etc/gnocchi -GNOCCHI_CONF=$GNOCCHI_CONF_DIR/gnocchi.conf -GNOCCHI_LOG_DIR=/var/log/gnocchi -GNOCCHI_AUTH_CACHE_DIR=${GNOCCHI_AUTH_CACHE_DIR:-/var/cache/gnocchi} -GNOCCHI_WSGI_DIR=${GNOCCHI_WSGI_DIR:-/var/www/gnocchi} -GNOCCHI_DATA_DIR=${GNOCCHI_DATA_DIR:-${DATA_DIR}/gnocchi} -GNOCCHI_COORDINATOR_URL=${GNOCCHI_COORDINATOR_URL:-redis://localhost:6379} -GNOCCHI_METRICD_PROCESSING_DELAY=${GNOCCHI_METRICD_PROCESSING_DELAY:-5} - -# GNOCCHI_DEPLOY defines how Gnocchi is deployed, allowed values: -# - mod_wsgi : Run Gnocchi under Apache HTTPd mod_wsgi -# - simple : Run gnocchi-api -# - uwsgi : Run Gnocchi under uwsgi -# - : Fallback to GNOCCHI_USE_MOD_WSGI or ENABLE_HTTPD_MOD_WSGI_SERVICES -GNOCCHI_DEPLOY=${GNOCCHI_DEPLOY} - -# Toggle for deploying Gnocchi with/without Keystone -GNOCCHI_USE_KEYSTONE=$(trueorfalse True GNOCCHI_USE_KEYSTONE) - -# Support potential entry-points console scripts and venvs -if [[ ${USE_VENV} = True ]]; then - PROJECT_VENV["gnocchi"]=${GNOCCHI_DIR}.venv - GNOCCHI_BIN_DIR=${PROJECT_VENV["gnocchi"]}/bin -else - GNOCCHI_BIN_DIR=$(get_python_exec_prefix) -fi - - -# Gnocchi connection info. -GNOCCHI_SERVICE_PROTOCOL=http -# NOTE(chdent): If you are not using mod wsgi you need to set port! -GNOCCHI_SERVICE_PORT=${GNOCCHI_SERVICE_PORT:-8041} -GNOCCHI_SERVICE_PREFIX=${GNOCCHI_SERVICE_PREFIX:-'/metric'} -GNOCCHI_SERVICE_HOST=${GNOCCHI_SERVICE_HOST:-${SERVICE_HOST}} - -# Gnocchi statsd info -GNOCCHI_STATSD_RESOURCE_ID=${GNOCCHI_STATSD_RESOURCE_ID:-$(uuidgen)} -GNOCCHI_STATSD_USER_ID=${GNOCCHI_STATSD_USER_ID:-$(uuidgen)} -GNOCCHI_STATSD_PROJECT_ID=${GNOCCHI_STATSD_PROJECT_ID:-$(uuidgen)} - -# Ceph gnocchi info -GNOCCHI_CEPH_USER=${GNOCCHI_CEPH_USER:-gnocchi} -GNOCCHI_CEPH_POOL=${GNOCCHI_CEPH_POOL:-gnocchi} -GNOCCHI_CEPH_POOL_PG=${GNOCCHI_CEPH_POOL_PG:-8} -GNOCCHI_CEPH_POOL_PGP=${GNOCCHI_CEPH_POOL_PGP:-8} - -# Redis gnocchi info -GNOCCHI_REDIS_URL=${GNOCCHI_REDIS_URL:-redis://localhost:6379} - -# Gnocchi backend -GNOCCHI_STORAGE_BACKEND=${GNOCCHI_STORAGE_BACKEND:-redis} - -# Grafana settings -GRAFANA_RPM_PKG=${GRAFANA_RPM_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.4-1464167696.x86_64.rpm} -GRAFANA_DEB_PKG=${GRAFANA_DEB_PKG:-https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.4-1464167696_amd64.deb} -GRAFANA_PLUGIN_VERSION=${GRAFANA_PLUGIN_VERSION} -GRAFANA_PLUGINS_DIR=${GRAFANA_PLUGINS_DIR:-$DEST/grafana-gnocchi-datasource} -GRAFANA_PLUGINS_REPO=${GRAFANA_PLUGINS_REPO:-http://github.com/gnocchixyz/grafana-gnocchi-datasource.git} -GRAFANA_URL=${GRAFANA_URL:-http://$HOST_IP:3000} diff --git a/doc/source/_static/gnocchi-icon-source.png b/doc/source/_static/gnocchi-icon-source.png deleted file mode 100644 index d6108c4182d54422ec09aa935f2fd0594aa2d68f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12573 zcmZ{L1z1#F*Y=rV=p4FBQo2(Zx*KU>NI`n&9!k0-M39hfQIL)SML;P*Qep@Z1Y`&$ zMEH50_x--_`QQKh=eo`{d#|-1tBNniTu_)@fq<2jRUYBwB5SOw{%`ng?jF0VzrT;HkWf%ikYJFQpf}>7kcf8uz;7}oqrYbU*)Je`#B=qeEi+Ky;%P$*WST9!2cdQ`(G9P_wV2B z^z`}fioE>(-K*RF2?g8x2#E*^3;k~(e>a!^Y2N>Z`it}bg*gYi{eNJ8asCDSTN(dq zqx`K#W%azB++0Fb?fsngS_R>3@9&K; z_4f8qFmwxc_VD~G%PJ-)`7h)@`Tv0a)eBhzgq!oNdjC?Qf{47(|3mvvc@=L@AB3}? z-(T_)|CawJ?LXy>o&CH$0{$xgzL%T7g6O{?|3Uswl;!{M6{P+P^H1V`0xbSFz(0xq z0Q@x!vVZHzLeIdw3&EeC!>a|5DiBkbjW=Q~K|EEdQq- zF>&dC*7Fa}zmf7nf4$59c(?y$IDb=bEk}Xic1-`v3Ka-`2}fZ8fB=b>s*-8&(m@2i zzms{03tbDtFl{W;vOsN{t&`UM*}|^bdrl0RtVD%c2pmQ{UvivDCpesoC=+;3M8-wY z2Es`I9-~Jq?j>0p>pW_#TxvDFFD3%_K#@2mrlm0nWqQrB?{425hP^a>QdePR+xxZe zu=u1%VWqLMB6Kb2=Fi!Mk4>0r&tV8V!vV#&@_w%4tTmc%cUUMpN4V7#E~y5K;w*g? z@=6tF;?xF3+0(f6S%BUH^w6_BT?yd67LW zao*sI1rrVuU-E>EY~dbTe$_Md%HV*=W?TQ4voQqM=?~+(=Z(re?g7qTFL{I*$g&_CdU^Csu|J0(;!XZery*Nky7HjVoFDXz0QGBOQ1K({*NX*f)kkRU zp5M}|Tv1hgrrOvWe6#J5RGPH!LKW&V0gn{!oSjKuUc3<)Iym_%B~tO?vNrd8^{3U= zQ*h>Dwcv$&HxZu`=aob!(Mz)Gh^HI814h|b+4DALx{n_dp1kpn#A?C^0w_q5-;ZnycK;Y2I z>tyI6Hlz8kE*$D_D)>4h`W2YT%{XAErpMBu8otU)IZ zn2!Cl#Npx$Zjxck_8aA$ukjZ@l*_?}x;V4?Sb?@g#hH0Z*Gu0vcHjCcV-DZ)9X0(_ z$40)3fv|$v3-gQ;mpL~2syDUXz2XM)iHi94GtXYezwuBUagXdBXjf~xaSmqpZ8*89 zZI0Ky>@H+e1V`t=^9eFyf|Y$``GE3yg{NSoqK6saqzG&PW*vAmZ-S!_>4L3%$7qn1 zG*>pf>row+AS44kB?Y7Rf#W&KB*n3eu=z};vf?Jo3US&9=@z`E-+7O;;TP(v0V_q5>XUy;aKb zfvUe#UL(XfZHoJU*qzcSXvA7DrPIVxhRJGJVUrqWee#uGmzFD@)jgNs^Tb1r;dO+l zmtImUMaQ9oFcq7hNgysYbTvt~$b!V6azPZ60|sALVz3s9i?=$T2e1n4NV4Ul_4I>} zeZ{3%sjfV_!6=Id-?2NF<9kL_n{cq?BejnrUa8~8u=15Up)d+rhosT?wt-mW*DQr> z03#_-Yh!J3Dl8O?&CuhDUGcU=SPj^k$^=HXyslKYaB)xsLe5xE)@kUvZ^nR_80`+6 zjO4F7x1V&}kl&Rmv)FcS@D4B5Uv7NpiD0g>!M%2*L9d26|kH~ zWg*{ju|2sfb#u&ikjF{hNY@^P*xY`(d}%T)_0G{l^Ug*y){vui)<%^scd9^jx7TLfjc&m6_MJix6>&3T1YJ;%IN z?j}6X4;uMhY$WW?i&q8d91cL!$A5b9-Z;&sNK*t4*9xl|1sD5(?EE>;1)BN}LgOsl zB}hq-d`;4AiT_z{y{0QR{8V4K!xMx@{Dl*+l0?greR4BX?gH@*h%_nt>+%2mft777 z1i9?^8V|GJ+!i#lSQ^)!O3v)a`tV+N;M>49wLTkF=tXiGm^C(<$%7b2mQjb6uI_;! zTu05}^dbA|nS68CGgztQjrWi~2uW26dvE_r|NVLcvsAM&5#kfWz<$*RaQ%Cq5}+fr zQ0Is78kIi7dCm8_OE9_tTZV45fYkwK%nOra{KEQx8koRCN;C7gN!HOM{A6{gwk|xu z!X3b@K#(qA%I!u^{PzSRFdUkY8Uh2C?ddIjXUgMO88KhGlaV*}-Pp(%8>M8Wo#-+m z)&fQ9gE08Tgcr(AS_vQ{gVe3yoM6ReMK<@o@67LL`}lnnmX=P03;01VSZVFV7~sP` zWF26nKnG#!niZ9%`v8F?4Yqw0E8Nq>Xe)ceWo-r(`IbQQoCT3gg?zz=j89W5cd$S7 zV+Cqa-m+?dAuD8o72@Y0ig-5c2#RKr)q+fk5-lL(9tyZ9F`uKKlZN=C|rR%PpW(ChNTX(wj}0-M9D zcpHocs@s_RRVAPuLrLt06Yz0b^n%BLRL*zl*04e{f_UI54t|vRi&D7oH%%x?oDKCU z@f!K`xNchF`~i0|>btQJvpB#xG?$3HiA)55RtqiKvwIKX+1SsdO(0`?44Hc`5linG zitQ%F4nmmbE(h&p-dm-&xJ*eh+Qtu^{0Jc=dHK=GdF*@X#Tk{Z6tL;|5ckbw$xL6> z)zZo3nfCYcR=12oZ+B6Oe$QP)Ip=HYS>rI|a!Bp>RCP}h=N<6+y31pDqr&E^KorT+m*M&@3&RRzCj1pL0FI+L)g$3! z8{HR;KFA{r%M+dvFss74b?oHvqkqbLew6a)+HfatVwFi#Qw_BwsJflj@TAc)c&upJ zrVOwTvy0s4B;(I-=4hadiTb*#rA#=$P^;eWq_7zF?t`h}ol8}rtR6e=5MACk+i zI6MPd`)QW!cS6K0+|PZE4i1EMVjA$;GApD<4(=tu7A8!gtJ{4bc7-)gaW&Vmq@vjR zl-|OKi)6(p*KWm<+T-2!^Ajo-+b)9G0-5n2gTcAo!m?ZU2H0pEb8Wb;|7h0R(+$F7 z@NCF*)=T`-02|F;JqucQhKh{u@()$X`tYc!)6lBeY}YxE&6YX5V_Ck-b-Hx%FiS~? z#Bdw7zkNon>g}(QpSOT&gdYJ`SUF` z6r2(8K2s*-lI-G!<$-VMO@@F9F#&crZ%WsF===(_jZ59rUrD;n5E}oM44YU=dKP>h zJ}>}oCO>>n`zlZXKiPf+;DsWxsq8CZMCM6@(`+xTuFfSmgnz#n zKhhLE&ZkcU5Rv!>62)NU`lo!%soDJzZ}xSBSL^ErzCNwGb}tYcA3R?-gAIgX+j!ig z1!;?(Ij`UErXwN|eZ7a;)oedg*(xt-YbVcq%s54!RlUp#hz|qrj0JwdA>73CWt_e> zugR%xTsgPOwF(QPm3^h~0C0>gLg2KFSA~(QAXQG2&)$&FccN1n66s+l0FwHYCS9r|urFW;ZsQ9ENbt3Us@jpr%++lS%?MK*-%if*+XY z%RB9vvng#u(6W~;9XvdzYbzXp%OoiTS+LPuBjGgKnZAtU<(%TLa4biCNQ_bZDhb6r z8fHZ!CB6A<;%&CZ=PW@<#o1mIz%x2nt#IitWR{erw4l87hB_1#3+7P>J{6bhP8lZA=;C=s_sI?8g(NjJy8v=$~Hx*sNe=!g5U5yW4d-QxPo$ z3INr(_>NLR2t+eqY>fB;Eh!F~(;cqn6dES~MujX|30UBQ*d0zm)=|Apl%NVc(BT1x zMLD?aT0a%q|D}-~>A;4HokOkpqnPlDgX-}b?DGJ$nmcNrt%8!m!lx0B|7`sQ(8B|< z6PA%9>zZj~H|!0e1>{X({+~YIcOI(vZIvjsjSv4Ujb21-+&HXyMLS_~y{JG1ad8PC z%-z0+wuh*WLrbrHrp(PDa#|^pVA{oKJMl@*{KJsxB5ai!dWd|H;FS&qZYqvRQzO-g z8b%nf9VeH!w{kS%Wx7CB6V3d)dUE?Geru~x##3tyQCR2Q!g<6AHKT1{)^wKqUGspCN3%pr zAFM%>JrBnufj^EHep}z@?K1i$(?;^v#L9bdE<(=(zQX9$;?cQPjjZMOx147gk~r*J zmbeOj4yn)^JrmYtjD7wjF&05V{IbjTUXaa?rtJC#SLgd7{+eHcxTWS>JN3UHo`1HL z6PhnA(YW?#O-hKe<>lfx?ukfHOO4V9$s99rem7)_e!i)t_;H7tPb~74C=+FTZL>vD zh8rY(ubm@1$oS4n4nalC9TkD54xFvSw8#YhxB~c298Cmp-6x+d1zU}4mqmGG?mnEz%$T#G$l@^Ko$gx}*jOW;P>f zbyg!<1$}9@p%+BHJ{f8Qf+MXzB5CI!s%7JJai4+;7VgttSH^t#koeQc@eA0$YqH;# z>3RLHKbwzlqI6en$kiF)2zP#15TAOz4Wapa2%{jmW0xjv9%Pt15A7w6CQr?mi8$x0 zVf2uYE5&7OcfzZ{oBc@6(dLBT=7LA$TZd;#oK;6+RoqBmbC=Vwpqp)zeAet8`)lUh z?*oMq24ePNzUL9xPbDerKU>6tofBuc_FPpGD}eFOqp7suO;y6g#+F9#x(s32I49>< zCyZV9o5CslH{fEbe5>8L&vX8JuMWx_I?QuT-4g^7AJNgfRbJ0py#{lpNR={J>$MF@iW37MV7!I#gXI&p=AO#rc zV#4S>$>~i73DES>9PiVyVD~S4eCLntpFw?gF_}y8%1S*)fMJ^M*7uc5EQPvc0REm8 z_>92E!3kt$6g`Zx;Du@?993bVs;r`;pUwqgPY?WvVFwKmLl+XHm;lMm$3=Om;YOUG zO!mxZJOD*~Rei~xY50)}i3h+M^I==PA3Rwe-=@V9!<%j6(P?$~fCCsXY)RU=6HpKz zv-v0)c#Q>28Q#D|)}u7lpeR!|V8Kk{JW2K^Xhn@vjWsFx*4IrdVqPtYL8yobw4s`% zK>*N}h{A2lAk#^(Yb*oc)55@KU1(pgSwAYwlnUhdn+yQtOo@!M$E$OwK)TdmWMcrh z-?&~K9hj>I7x0M&Q4T0QB`?yN*n1TsAqjN~C>Rm-Rlbw`H7=0A-)Maelrkzq{baKs zla}3~f&3xJ(#%Hw_|O`j9qQRluon+Yp3^t$BIb@+%M}}zpT}uGe2Dhua0uT!<-$2K z9(jFuqQalL4D&hTthhZ8xy!>$<751_^mKL?WM7guZj#xXR`?~6m&kmGTK$w(Lt?_r zUN=LImUb-$e}#M@ZnMDp6CPJPQXtuKg;;BMPIXS%ot@^Q7l$V6lEqY35I$o&_~AgG z6!^V&{KBt@&YP?uS#?uZX5#+6of&n&;H-C3HWg-Td^TiKpeUGCo2^&lL|*qAKU5uL z8DDjDmX-v&dsUrC*lG|QS>p^T{u8Ky{mnmW9xq4F-Kp`sdp7#@&|8LDc{2T6-nZ3( zLHN|+lP}WV)!!mu=UQ){w`ZX_q^*Q?`s34JbL`VD$T70zxqOLC5?yvd!{G!e-O!KT z!7GzYF9{Ur`Z(7XEalNK6?Pw^tyuH@yI>lM`9P}e?ztWtAw&En&ie54baGph*OD&wjngUw%r#gtxTo#_^VP-dIyoH4%gk_vmATlI_yi zHhpJ~$+bk_8)I297kfQ084H!19fsFoHMz1|(JWp!J&r4Vj^ArdJROuYXlg;pb*bum z+I&0DJWAVotmw5B3iZLVDdbG-o0;EXySWgY&x)+lpyWDB*;qjGq%q!^F7aU9sj41N zjUl;H*9UomrAs!m;+o3}6RP^yi9w zE(S78q5YM_>MaFPYXqY~B?>*Hq#*Rq!572t#`~%z1Rz5dL&`|#7BD5lf5!6j4T8rJ zbN1tqi)bALAYF{(sX&RpN$Z4D$ii-YTo)urTmW2YioU5egqdod9{~XBVj*@MA0{7c znmeizAAIX~0Ql`1jy?#RJlebFv&yZH0O)B|yq+sRnv9(NMc(j@Js1F3S^i|Ji-EKA zg&}};GHPvqRS`x==(=S==ROL+w8E>iz%Q(YU5J1j0Tw|F{osmCG;YqaUSgdRF!dm2 zm1dp_NjO?eQ{PpS^P1__iNV+r4%I&f4D4}1Kus}M>eZJbN%EDKzfeZEi!h`p!;Zo@Pe3l>F>Ulenj(tseiAeci};7sPmmn6QgZ$HkjvjIJ}64E9Q z)jA8*l=J0@fG@AH{3HUQI2oZzfbb(CfMfgKGv;MC9T6aSj6BhsCfXl^Ba{RrKL#>V zP&>cIV6x}xxA_e)a##($C$Sto<=)Ey&Q~A`?$Jk|d?AeAR@UgbCCJop1#h-O8lT!t zPUC$4sQ*#3}!T0D5yA!4oh0rFrj$t}~E z{3uafnJY!9i>}BP4+ z-UPA!@-m{fzzX&B(4VmnOFc-r3ll1#U6wjw8onum#g3n@j(#pwv) zA2~^1ykw0)TOFoRpIErR5?{P9<2HFvu%k*6LRc(Qi#lk$SlJ5?6N+6HI3=ds(`VQw zerLPOV%S)pIcmrzk2|>xZ`+H0bDz$FA?Ibp$pI7AfL!e1!Rx5zZx3xLqK`L}&Yqnz zW~S^(^-$=XXJ!ecHbq= z%U}IUK0%W-4YsP^)t;iX!U*^(xMM~WduA2RHFHTPxGt|;D40|c5uq3$HUR=ne z>il@XGeQHVPj^q@bzv)1!3*;{FMxH$kO}&osV3!TP1c{WLMXVhi$??_{Pc@HrA;_@ z2rou6O~*!kX~t`C9+VoOcb$1xZ6jITH0@FwI7>E5<{P$_2y zt2D0vYXvl$9bdwSM+RXN0;LB-?u$!a(fmW6v+XQ%u>q{|a07YtM}Z`y^g(xP=1$1x zFy$$MO)Onwy3rCh$p~}jX_FQbJko`9kJC`Q`UJv6vmSAGc$>~&&ilBtoOlksN9AvV zSqBYwfoW84qg$6tGL`-qPi=GZ3ICMA&e*cxU(GxRF54z*Oy)E9%T7mnvF=vl1PQVe zM4IukDpc($nF(3uK9?RrOXRf;0tENRM z?2^Mxpr!CyjNPTI?}&t^OyRkb){kineB8P?-rkwIFc~17q!1sH(8QjQIeb5#IN2`$ z`%-S@n3$N+5>>^Hn(g>FzpEn9(Y{2_|2fIdX|N=16vko=N?Sfo^Avt>fk4gnf~77R zq(4WTO$^%17gQQWbgejJ(vL6r#O^UQGQWEZ=@7qz>BvvD*>3rsll{UZ{-QX=jPh91 zLMC9OOa=>pBVZUuh58K52fmy`_&H+Fu(^04XJU&@u)~ zDGb6&71~+Qjc+IfbP#7fU0?@)HQmvdct~WEkU-#LU5R%0gho3I-jHpRf{}Nu5~99P zLoHs3-giAJg?2!Y&m6Y}c637Oe>TCUZ&Ds*FvaQsts*W80Zxv?u~cbp7bE@cwvg!; z8Sq#Uz=NpxD^u$GE=gudN8dN4KiOe=Y9`;kg&*qxpT&+yM~x6Yi&`Ybm2M_I>ML!Z zFr7R7EebsOMBy|HsUV@1UxiXyZsprspKDXG)W^QEd$+NPm0r)WS9=TN$m)g{@DD}y z+Th#Wi$eHo7F?$n$mgc?tcF7K4{eX$;W&=bV$5Jd5fu}yyV=G>Tkl=C^-Dke>>o=< z*c<3_2(rOyB#%Pc-}OX(nJ>6E8td{fLndl<3t{*9k{(3r$!9gG2BV$z3bQ3vlr;@% z=;`@sqOmL%B)az7fq@@%m=8zE}Hai$DP z(Vlhzdj>KbFTI=OFfxUzrQKU^hANwJY&O1lV^f{^;K0_Y5Z-UpAT<^{)wVW{=?lmi z_KTk};oQ!BYsd7FVcL&z4_-mYuW|2jgGVSWp0<1cY726I^fJP{l!N-0!Z4Q(YyL?Y zo@)SBzNUwlUY~8{{Jy6*y1`hf&n4@XCyM8Q=c>zG4e)%=R5zN?xfa7m_B%0xJ(Hk! zww@hX95Z)$AQgmu1MYPfihwXwNL3N_f0l%jiAB2;FxOKpEZHaK*kkeb)G9Tu^$~A8 z9X8PXsIdT@*+CdM$xeG-KiyEiNQfT9E|Tt3_2&E38U zYf#3*4+2c5bazn3(@n4~ol2YgWJC+C0lbU%oSRmcqE$UXY zr=%O!OR_y=E1R@X9?lrM7`#mDOuPGXGnh%XFP3W;m@=IEE<`m*dy z`;T=>yRo1B`N4J%d(BBqUk+RDXH1ysyA~XleiN`EWr|4uq+4~qsUP=?yK1WRy8-=? z(@xWS4?+>_q-B?89pI8mB+cNyeZR}BUWgwvgN(~3jB~%VWbs74tgl5ziyc* zNIgf~l_og$e&DW_!)7bjgqk#F`WmjlFi*@BVKa0?)UDw z>Tl}(JSIZWEW~8q-sw8?qDJDXMxsk6(=@gsCV7ILEZtP{IdyT>?8~0&{&sv)Q>C_7 zEuL-Ly{^Oa2aWBmK@+RhrD5tSGs)0Y?Qw&lc8_ZB3lJAGsf_$c%L zeDKX=H_hHYVD(%6aTEAsD z)+6I%6x)SEV9B^q_xMG8?Gv0O=PYUXX}TAyk9oOb*8+)mtN-|aWMQ3AcwO%{d1Fq` zoU?&y0<3ZQY4@2mQ|#xHm2YuGpISmis2_Upw6G_Xzjl8gJ%6EprlC5o)~8s}ikEmw zC?v$yB_w^6XZMW2=<~H~*DF%!3xj>-)5KH^4cD30E7!Q4e1n~4h9Z-e9IdR}r@H>d zMa{LB#`U$CURHCj;pm)6!s_QFe_kJ0r=|g`(zuv(Gp`HcnLn9ViXr6&kM-`z=H)Ml z(P^#j_iM1`kxKT~)|Z=+BICrwu9>xb8sDqg%KvWJ}($Zt9+a?)X_LOJ47&dL+PBHw(b zbL{4*I|@G3TWjB`Ys}?X^{#@u<%nBX2?P=jx2fH5`Jn-McyR4k$&27$2lJw#kp4zu z^-I&uio5QkSvR+-GOKH(wqqy@X`R>&5`f#aH)$} zSTY?yk#q6i4R9b?yUIb+HTQbO8lgd%t7>0Ti8k8hfLWcr567QaQgpW-HTOo2qnKF3 z|G?I7LqRfCk=;_)HN|hwKF2wJCm$p3q10WogscLCat3%#K0=-`8IwbDwyP%j8IiR7 zYmKw#t%t_^0&XxoegtuTgJ|6v$&pL~OOVl#KDWl>mHjY!i&aA#RFvX~mtC{r<%MZ* zc>wy2MUU9}#zxTu`nSW2Lu`1^=1P1C&M3o*Z^vY-Ecn;B^KVa`$Ub5$>19Wd{Q8GV zwze*aZ$AsMnjR(P_*p9>cV=JYMor)D$lj0~6BJn{&2mB2+?5*|9d`pI>$w;oP77pt z{cIxZqN>n8byV&y^`Sop1z8Bz!t#ctUV;LaLwk&%rM-m#QN^g@qSGS-vjbk+o|!CN z*cKBUAtwkD5L0va{e3XfbK)aFlM9)n4=iX{`iMMlWII-9UU>d}AWUid*ZT&l38spH z+Ss(0(kI%pwh!GwUZAjX1pbqO`PeZ5L9G{`cHg|On(scER|;>jL7v5Qx>#c+6%Q-z zs}I)%&E%h6Kajd}baPVib=s!Hp9*H%5`+At2yv8S1MgxIV3?I4o)PRCxy+KYyBI<2 z)i;KgQB{+C7H_As1LeTxyOZt6#v;m?iytM?`)@`L=edKJwD02;S^7TosBuzsO%`ZpLhaYrE?<8Gs(tJ11qbWXlYi*l^D}>VBMw4v! z&vjSfCfZq>D^2+>P9{Wf-3n4060;?rnpZp|#|+z%C2ew_Vw9?Fd?3f>{K%VugCFqn z2lwN8=ED2JhElfHgwIsk_D#09I9FrQ52T`HK_{=~W8z7q<1bM4IRe~ypnicZmV0;W zug4_MtXW*A1wK6cvuc%{mm!!kl+Uv7muHQ^{&{@G@F}wC)Sa`-+uS>5dQM63SS#!7 z9#(^&QyXi(N$eF=0Ajf)c%uvkehw?ejk-7z(KR4%_(nX^s!zGh;3H{9095(dhyZY&lJ9*kKo)i6n(pIFB z?S~B~n*`D!-jYzPZMOIMVLv>SO&XWBd@_mcXIg*9Vu7IwxgL>kM)Xve`s;g;;BCdRl_x&GSUPMHC*k`3*Ut)ABkk zY%hniV>Z)R9b~VpD^1~9Iu^a=o%3(Zw9i?#BO}_L1QRh9<1u<$49|NWAx>N&&lXeOE$JDXu@| z(P_WB;@iS^w|A-59}Eq+X;9XT=kAdvnznz4;b@A@2+H+rp>=BQ`^|oVQLI8e=*uCw zrs{2Lr#QM3-0tJkK#9B5ZBb0xlRBMw$a}k#Ewc_v_J$9VTJY3_|9aRe(@{FaSVLUk z<>q#OCo`&KGiuN3YhS3_YZDUNJ3;6$r><`cYP^*yb`^(5wg}6n>M};f$dgs+8?eqr W;?$I9q}E^GENH3et2QaaWBw0S@ynC| diff --git a/doc/source/_static/gnocchi-icon.ico b/doc/source/_static/gnocchi-icon.ico deleted file mode 100644 index 783bde939396df274f409f34cfbae74b43b0aa68..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 370334 zcmeHw37BMARc00~J6NJXOLyfKnYDFRX4bxRkCr;9AR{U+-w2A@E+aDzh%-8hN~JIe zj5C0YI^qT>=va)>)z!6jb!B8mWY&^VjwncjAcfk{*x1ctcgB)c*Z;o}FY?99_g=h+ zcccK4UG-G<&U2{^nASD!T0$1?30?G^ZTFv z*(dXl{L5GmJh!Z#Jzi{l^JKa4^M!@_?~j-2=f@XoAHwyi(S_5kz%zz zd~x;0aIt!0%lzt1>*9O;`TXh{WNNq`A1&6tFt$+rEXw$Jp;TWPFV)UYlek6}?N>0bUmjhkJ*wyJB(UDcJ*iuu z&W}+a0WbU^+VF$7o>R|EzE`CN(gW%40qEJUpxzB#{ISI5_dYNmNC$zwLOTPz@Hen8 zet)9Wc+t?1Y8tBPek@Wu>48XlV6wdSeV8XN(OwUPrKm8PK)KWx#un-i7fQ9$;Dx(~ zPOGOz+SyXL^gv2_U~Im&gY!LbU(FltecKBk1N;mBR46y@D=gGrmXgOM)sP+ttp_;Q zjV;zbX`G%Pkq$A8wTkhD+DGAYeCzP}$8QVmK}bQ;12Oji*3@1%wov<$?;IErj*Ph{ z{0Zy$9yi(FIM)mA^MQLeUZ8IW`x*bcV`#`*!+T(2ss6(f z$D3c=yKw==!xu3we%$b(pZn%*S*}iE|IR05-WUA4@c=(VywRtIi}eG!2aACnK)s)P z(!8F1oS%MXhAQeA20|g1O@i;|{?yI=!2i~BRdvUi zs^){h<1?pm2qW{W3g-+cU{g(&>uT##U2R`(s2!Kq)E$>`U0G98S8nKQ*VP-!d0F34 zZYSjVJN};KusrNdRER55qs6MiT%pfu(r&)Z30Jt()#R@B40E9yIQiKR?7 zT5Zy2_t-F&6)vE!VES}T9l58j4uTJ8D~#f^tkmDHo`|*5x;*vp>6Q5B_Mmw8m8+=t zPc5sj&Lk{yRB4<`x8gSzv+x~pKLdT>PH+LQJ@?hL{~&9+0c~sA+tjJkJkR^$-eHG8 zC;05lRrT#Tf>frO3S08Gvw-oSbtD(iHXsk&bx%Xv3cH{ujA30NXcD9Qmn08c8fbty`0}u)cie>w=sQT#kI3~juD6*y=R2`)e?NOoJtqFbL)c5Up$n$IH1pia^ASJ^E6XmrRr!EF4rZovK zE}+e@54OVg2y6x9G*&y|+s`xJJ=zzw#G{1<9x8)P%#?Ph&hiRw>eTTCS-ywOxx@ar(v#!86 z+Ub=WGWR?1?!^o65qxg?iaHW68uEEJ_yND12JEG+4r7irMT-OMfA`;8*YU}WOY*8e z<9Zm=<2BahbMF{t@IV#ohu@P=WX99#$#U&iyvDA4-o!P1gVTsFpgmy6zx}lpgR~8M znC$x%=<}1g-y7qB_^+bYAos)ZAR>Qvfgj*e!MgVxZtok;^|S%}xPZK1`3|NoV!csx z|4{TF=mR%rSJY4C524Yv7`eT^+djtSbucdAJkLD{+?!9cWd2d4;L6|WJ*8nom2q3`c2-%y87udf|A@$Y4}5R}`oZ}%%_E{81ot1p zE_h9T5F36=;jjLjN!W~|E!@BBg?*>3yywbIb>IPT0rY>T4&e9xPVfOaWM2vPLXP$g zIr}fjd-@5nZutG-qr?0Xk#Bx1uW`Zsa0U1C@NB&x<8p%x?Ed`=uoF)8#s%aF>Jo>} zuBrWt!Ur4rwdKbI57b~6e3SX84Y!5N8P^XM^7xyrnES(u>!qDQoxc}5!O|HtADjUn zfKPT`&FJ|-5v@Of2OfhyusvLqm|a>{&j^c~gCg700}Ssk-cU#S&Is5!?<$SV$yoKCw!xyVJ;>J^!?#Hvcc#d`)7}|RYx&hV;=?Adp{9gZZJ>tMT z_~3HB`2hOBnV~uLq<~1str*s`zCTNNjJvMHf8VzMAi6I623%j%YYGGK!IAsG2Nw~8 z0B(tc!_syS#16bMx8M%gaB#o};>J~0?w1(;o!o)-fPL@-(jVB5@4(U{j+_J^U~Pha zh`g*ha;*Tl3|<%z4!IOVY~ZJ}g2}k+8JGUw)7(kl0oN7a4;+*a@)Dy6UEs4bOX~T# z1bBc(56@RqkuNZAe0RHFWd1+r96!kW79R|%AGBi>VS5;>A5_#m0pO53k?~S}wi_1m z_&wwPdYYRD!Eso>wd@7PBo5#|&@oAjQ8X|n3HuCxFn1vD(%{K*<8^t&xL=#Ozdx+? z|HOEN{zU9Mpsr6JfaMS9^{;)7s`$M{P2*s@8|=}Y`!iZ zePR#R1h`hnHN$>=2>cy=5cEat$#6^(c;F9n1@bP99b2gVUEXl+*Iu6IpV-{(gW*u? zM4JJ6KF=`dS4Yr%06xiTOw#mK_4Qq#k&Caf-{ozxS9txb0Pgz@6>D)}dcgZIj)+*+ zaR@!`XR>Sy?tM6LF*`9ySSPqPG^94=0_Yx%#CZR6*(0=$f7$>+_x~Bq6YwL@ZqS^- zScE=&i#jHWF$gJ-Ny@W-5I%ta=cfK?wJYTbbbNIL799`z&VeSo{#hv!N-bSng3}_E@^wZMPj=bA7dJ05U{uKM~wx$OT)I7>aU3e|88e;ffFXqud1CmXOnq-nah_sx|qXr z3ZIc9_`S~G2iYWbk5tA2+A!A(J`<6JuE5x1+7H|Z&ApG^)*jLlg8;w4gL&}_AaBq& z4i}T%8%VtKto>=6#mzh%R=#hZyL|xf_kb(--A?KdkYjG2sC9^mocaMH=L)PnXkNfN zBG(_ZzTn|o(0qWnrlihA!Cr#zaG5fV`bg&wU#LBvk5x9fiIH+7XCHW~>5r1}8UAe>fEPxhNHN-YCGk zG^Vgv`>-1Z)A$*&O*?^jr*2^B3f3Bw{jZ%LnDa;L5AZ<*wJ&%cP-^kgFV3?1JH#Am zy#am(?sL}sfHP4JfhVGlLEyf^T@^Lq0v#hhnpkYSA&poMS|{oX^es-oMxZXxEuW9Y z5te;HUj%=<9l8Vkl~&F#y%vahNjM<*nZ1ei6`vF15~F-xD{9UNz&jc-Qc?xu_5^|x zh-J?C#5eQ(TDTATm%ljxzZL!3jC<94=`$Srniw{-s&ly18@|DhIpAZ&+q}d9w|$KP z><2%d;Mx`A;XAkizAeF626P1%T=)9iU=y5~x~%Rn0{mzM;s9P9RNppDu3!7XiyxQ| ztj!IuS9SbblzM3h`+BLd8AlfH4WQ+x5u&{u%>VTekNkt z3-3Wcr~jJk;PhG1$4y_iobPRvzObb>v@>ix8g ze+>}l){P77-_y^)HE*8fNqr&9d+%NQ&ScxAq8`awu98BjaZ+@E&e-F=&IDkb8_wb3 ze9qh*-C|$um=E%rdug~=mv)3+mk|F#YJIQp6@F^!s`}b2=mX;y)UA>$q!a(!mP6VP z2#gY|#H>EQ(#t<$T5tWw$2dGV03#f@5(By&G_XC3y+GlS>zzNnm z#6h0T0UIUKm^qW}`vKbq-@r@L$^&vf@VX8#l9Drl!Z@SLsT)}D^`!Q(=a>HTyNuw3ih4)dIG|8g zBe9(g62dxNlw7rKDI_0Y)bn4Fy3)M%mg43w# zISl>8sPjNV$N^ySfc2L?1n$chJ_uo*p`seI%j)(t@W7S_p157?fK_Gsxxj7zBG3Cq zEP;jVe*X#|49)|H8F*tFIAF3|`<1BX)~u9^-0At!acw$67;y>OUO*fIZ4p^>49*Q) zBbYj_o{}^V6c%bLS;1;l^)$)`=+zfVKLXv!6pn1TEtY4JhKTvgwm zGzW|pYyT1zwz5#}_ON1sd&OQ#<@J6@obk=`4hFRubiN?w50m%5&pFYu8Q=#T#AdMeGB6%#H_G(9U)K63jQUp87m~CGa8}SyWMQt3s*b6Q z1qx$aMNx*$W?(##rTh2#mwdqejIn2hGG~~<>k2=S1P3IYM>q4zE9z7riFW;>uq9^yGlz32$!4ayth zN4!574yfja1Gq16Ms$Ge#OL&&Beea9%+r$5XN>(rpA-&QKg%l${X^CQ(yaq@vonJF z5tyTeXDDPYJ{fz7lgLk&CtF}fVpOxE19YNnW6qP{t94nU01KSyDEr>RWt4eS^rIh-f++W&4_ud*Hj{ z<^Y_Z^RYDHHHx~JenwcH@!fOVv1XuiK*BDt_C9Le0Q(rz5N`kucxBuiP$<>UM}fOE zl_@bnIkE$K)bSWs!2I!tTIVCscG!=2gWU<`YDB#7e~6m{Cd!RBr3tTL*G2XR3;qM^ z3e*vJrW))6#xZk$L)?3sXDaGH#?1j^i;bgU<1S6*-EnzMd;vYqje#*^dbzC_C7yj0 zmz|IV2at0RKX)TdxDBf=vNu37eQ8e1?k&tRl^!%d$9rG1I$kF!_R z55#Q?@O~4e#s|Zid(%{o*a4ykgsKO`!HX63oyl-O^+jpIZdi4Z^#I|3tZ~51HT88# zZ~*U#nBhiP*h@n>xyu_A#~da1h}=HF`ND;`Ibd^1J!iaB`)C?)8)h9OZ;0psSHfkiIW2+78`es7OPK$i9NTnh96i}yxhKv>a*z;#CgcPC4G>wF@Q4!-W(SPz-Rv2 z;fvMS)&$2ctg5Z&s%ppCs@jIXqj(<`{<$V7_!m7Oi(>$J#~z(Ir=A%#4`3hdyOPW= zHh%xA+I707_MJqo-}`E6=b5T1;Il0u=?aCs`##v{?3B4*x4uI`{Hmdoz zpK>ieIC!G2j@(lRFVrqAj-0QLu!`oU>jzo9Nb{eXO6 z<*7M%Z(Yrvs%aa7d=W%fkh21W10uj>toG&f6?G(R9zb5frMR#h#(jwQDe?h0z~Td) zj|#fNUf2-i3NKwj;sS&Nvd#hU4gOx3902?HR2aBSstnBs@EII%@PWk*c3olO0?rGG zKBha?IU(!gS?b>hCnF#E*t~jL7(4*||2LBK|AoyPTwkCMk$!_ap%Yjpy7{{+)b6pCQ*8_hN0qiw`Vr;F*%l zdCz-|e#|FW|g6 z`W%H9gcqXZg^K!2uN*M8SbNCAMPB|EO11vRVfze8oHLDe1lj`K@bC3~do6-?1kaxn z|3ZJyBuYGGwCr7%)l5(xm|VK?s=VOd-cIcU2;=$Uyg1J5gXjo$j?jL`Q%%2Pdp#nd zGpm!dY31I7`Bbof;MGAnAP+f%?AYh)$j%I;j)1wG*yTCXgXjo$Uf^rmAo}cTOJ~4& z)=8abFZdh`pKBH$hBR8xZ#I`=mB~310gTD?C2Vm5Z2m#l9PD`S_chlosIOQ#v<_Ts>Js~wTCq@K zZ={+hU=O^}j|1Qf{IaQ7_i9(>atUjHLes~1=xqP73H@?Fkl$&Zz&Zx&r+pI0yGNTt zcp?S%Mn&D@#Q|gUwH}_BjN>a+ES1`h?9l(P4KO@N9av z1-to_2mk)#UK{|w*gL%DN>e{KHkUB4Lm!{T3!IOSoE$86DM~)@DqH&}P;bjW!F?Od z=gruQy-R2#xKH+B^rl;2eXuq(q&B(n0L}-fn2L3;b_I?L9-MpAVI$Ixz#1Xk*wsb zFxwmge$cuDa6{Yu0Bm^9yP7AkPC!2(_4l~(9oGHabPC2!kPjKJq1Q%p;?MLtV-PvQ za&txiIj!%#0jB5#{kDU42Ikd0&?kr;+8dfPfG7GR=mR8g970Sab$D_I&t1t1e`r62 z!~W25Rt9ke)(5}esRul_te$P+T<_PWotM_yx={-KHfGk1`Z)Cs*zuY}5Zi3=2yJ|F z3GIFAAev90hj7n@<`noe=wAm>uzlsEh3k&*n-jr zc^?B6b;^wc#>@5hxZ%d6&y3wGocA#;?eC4(E9(7j98k!?`r!6ui477xXkb0MH@u~g zpJrFoZ@6&)&lSLa+8ZX#`_8sVY>frEq@0zxU#R(Z$DKU}~=6@r6F~iKft=lW{CuHpm#5xuy<# zaR5I@EbXUF(*LctM3zrbI70NRjE#Yv74-r?4wzW1y}`nbNq=QcK<3Ut=1x<7ix|PG z*SW%NUZ|)iBW}2168_ni+?^quvAM|$&=|nF z;GgSjN9_pdJGc1%Q2r zNHZ64_Wc5Kewc88Z~(?YMSY+H-~rzNKYy8ZT|o8>3kL`XV4uK>dTl`1x5W@I^vfpr z1SCdKI6yc+sa;pp{x-M+yqG9A;>G_1t7`ND@}1#t*m}p-ay{}j7a3#1GpQZ}74`X{ zIrXFf(AOf^Kll@g>>UFBC(f^`?PsfM>P%J5oT{l^r)z2l-cO#ds%_a35oV)o%*t&RV<$)8%!3oq0W`Tclfipj}8%NAsXsCV2HOm~d zIxlSuyqotiC+$xU??MN-F|(weAA|$22JoTK@ETJtbNUdrovW&8*bN8nZR!b5Ua-Gs zp86xmS|!Bft>{FF=nVN0e@;z2u0ixH2|Iq`v9V+rxeq zumf)Hx~hh|=>dgO?R4g`?qhpw$TJL_a$Y}haZMdQv!?DmF&HP<`GK!l9=YbwS#ZSt z&?V-<5sTo6<(p~`Iieyw5%yS@@;rI0*bV=;Ej%$iTujM2fDexQ<)}b$!oVbb0sDbd zV)iiR_(63ACokCFYoEkP_$0tT)J@1m^hxX$zoqC7?IAFYN-gzN*zUcc5 zG4KNCZR+{-9fn~;*m=P3H95lWlMs8ubB+_v@Z*e%dZJKKn|(O|Imb=UHyrBuLGTB1 zuBL8JU%(N}<$hR>@;j|dz}|4`65_iQzG&M;74;rp{Nu~XQtd7FIn%V)vE&r6^Fqw} zLN|_S+Z<+=&^Msp5yDa|_;_mF?!k0A%?i+)pHT!-aFP7+?~bu9m7H~imz zMSYD4djjp4cfRi17vN`O_4^&hFvmHuKp!A+L|cMb8x*g3`n~3+*4hi#U-U(u;)VX~ zxX4}BMGmuHaU-b1d{>)l82H(ZGCWKr9PL$uxlU8lXSL*1p@bC39YoEuo^ z#sP)JTE&hR(_Y8o3yAVLk{h5SQ;$AyvAH(Ed5dE(PxF4%ZEgEZs3UORr+ow4rtLFn z`Af8~BunemTx*+HVI;(AFSE=;PD=2;e4epM=IMZF$Rv=Nc#HVbn|8;IOM1JEiAi zu46zCB8K%^24k{YK1JyxjOYJ6=qDExE;dTqh@Ya9#wav~Qk% zdR*iZebI*>=!uX#sN5M`?zVix3|Y= zf1ttRH4m7KyNx(M1oUe_RZ3xX0EAka&o|Ax$*O+ z;ofSGtP7eoeua-hd7iJ>$N_6FwJ>AaU*t%UIZyN+83VR4fEa;SJ2~LibLyF12n*jmo%xdArb z7yN{X9~1i3>rA23>ZvB4Ba+**LrfT}(uPQE7{mu0bJGL(+1Nt;VbgR0;Q*NfMGxv6 z16UjU1uqVmDA#{iIH0q?Wc)~f7&r!g+KU4UWi?`2oPhWP1j7bmcu*T*L!6+U6UK^F zqwE1$7Z45*4(J>MI9v3EcFa3pWADJPnq&`PKh&U{DZdxroKa(7x}v6?xbJw6Gsuy9 zxM2zpB$i`PaAOp1OwuK0melh*Fz@mP@wKN+;Q%>Huu~5-X)H@ydVjWv5v&c(sVBL> zzT=~@c{OHAtiafU>>CuU^o|u%`^@Vsu?_-gy}{W+7fsLwM&?&VCl$Ly#=!a*IN^qO zx6g*pKYrWr#p(?cctFk(l6lb7dA;X$dF&N%;{bk!J$D~8fdghF$B@i}>+@jV`{KvF z@b3CK)(CJ8&l6eafvGDu^8VbV{lWpwF|eZUa>csOhlS;}yR#lUxZ~2AU_dYs?ihe> zY2{+odEb%Kxt+@#IaYN})wK0F&J~}*nPEO~_xuX}!Ah2Ofo)5TaG!tLo~_g;`15V$ zQ`YzeR_*w;^0FU5esktIKV;=*Khw;Gu5+_9NBTX;(~kV;%-c@c{fo$djvUN7PjiZ& z1=a*U>BIa@d( z4j@*pwsdCuoNM8>NZjJ$yi44+=UreI*yCJ#XCK%NigC*6IsIgFPUqaeA9dQh1gu{{ z-BUcL_ft3M0sq+-AUZLv}8wZ?o ztsS%0d<#5r{v@_~!D7_1)B_Gfrzh929ePejTQfVS_rXuo*VMiMFz+f-DA(VeMVlbe z^+6}jFvsk%um(Glc;nn-VXlok>bSft{Amm}bzaV$mfgzvOJ5k@2Z>8w(1mHQYJHgY zEHT=eTeV%w`88!Y9fSw0`JCO4jBr;1fWTy_@eNti1=_iWT3FoU z&`<4i9(}tO?wU4cOINkwYy-xCFH3hNzAW1_jq~9kI7|As>`(F-xy{ns$(!WUJk95Q z*#Xzoy+QCDL>z08?NnVt7<2%JI++q)ajbqg=_j$ zP0ixkeP2!O!L|2fP3=2bR|oE`tAn@>ov5qBxQ^UYH)_sH8jDfgUt_%GZ@16o+UE}5 z?q}@FZ<;pX?23AA5PWx&xxG}~7N-tCE*QPAstPSWAQzAa$N|JZu}|FZC+6E&ucyim zjS4SWE4QtyLuaC$(w@iRZ=nAnmFww!*xae*oU3YgH+%>Ae!NsW8?_EFaQWbk5nN+7 z?$}*~zZZ73_Gwu@XvTG@ju4f&zxWwrjr{9gf@fN3K&F{wgvhG}$ zqaRV*-e@D+OAh540sDh%O+%wVm zz=)r(EAPua&9v$Jc&>~MoP-YF1>S+a9xc|s5C$IOE7Z%rE68HQa*8OBnK(K5AmiuhO$AjQIh|I)d z?SF)Y$+*keCTjws2jx@`M1JxA8U)`#WF|_D%$_a0Q_c|-4#*=1RMZpGCH0&j_zog7 zzF1$5d;SjVezGnowm_eHbf&p8tk__S9}0{0YhhtB?sBGAx zfSDDw%Z}eLUc(=7Icls%QGWaUFq!L&o9j%)@SyF24)DG(Fz+m*P-@&81xBMTb7Wp} zjt@Gf-}-QL zq4rqR^LE(fi%&2kV_58!f$SB;1i#7;yJ3A-DAh9Q7nHe7c);W_02|<)VPU*mIim}W zJHwu*<0wyJgoFc(=71ITgWa$j=J#U@wJ*kj+c593>#D>Gn~Z_fwvFdA$B<6}@j$N( z6XV^=nkY9iw}<$S%WDJKTB#jlVne6nxdP|Rv%s_3d5$~JZ)fh@Z%yi5bGH++*IWX4>^C&UrH1oJKpER*<Y34(vOdR^Y9rMoD zI4kVyd%_cGjnjzvEP%N6YFQ1`4sIKcfMxM zMdnW>Kj<86U9i|weL^g2YF<67H}r-l4Vz$oP;AEad+`b6Ll@Z72iEP_XP#B+{z=*P zz48cR13nubqWdTb@xzE6%*0wD&OgZaJQ(7<)9W}5U7o%<%U%zl!`m_Le68nt=>PQb zrA@~V(r@5^#XgW5>Q8X4z(M39UyEx#ALKq_4`fUiu>0ZRn>*y-b6g@xy!u@CW`tpRf)6XOrc|o73(iWX>^(8%{_Uu;z5Er5)gUo8^PYoZJn= z{d~`L`-2x+Yj+0wgTp>IbWmcLD6hRU?K~j9!3a6PveT`#HegWC=_lcX4{1*K;sdVN zQ@^J!k!Bx$m^uK;dT3D0M)7-n-q3vY<{)-^l)n{SU_cJA&F6Ye-NHHJ@>|V&&%0_t=_1-8j97dTqhx}L5>>t>=RL{mUPRf^0`<(IbjES}McC3>hiD5ps znj={KJYu&zu`8|U9jdK>y4GJ-jSY$j~ zC+Aq`&U&83+LoScF<)`M)mX)xtFa25ns{ZbsU5T2bEa`idm6fSRbCYcJbN8%5{hj{x|WUHJZDmHE&`2yfNAV zv;nk_t#ytB^?ustmd;Q6occcPbL>HF>-x0OGpg%z9;e>UxttiMEkhrjg>%|MS&ywZ zL5~3buSJ8*_(~lw)jk^foF9T7(dK}Fep%n~)bc5G`21Qv1?tGOi(P#R^a0{LI@-={ z{{npsmY;#X2EWUaZT()C#WKNN|KI?19j5&Ib&&A=P=qXWklMK(-rmd z_|O{7oyHbxpG+eM?38myBOSZKFN6C6p#%K=(7gJJXpk9Sso2N=G2wv0$Ae%he|-oJ z_{sQ?8r_`=3$>Lra=?yDvaeWpCB3|Yn4nKF=Vx@tjIZR0#oCE9a)9h7Oz-#?Zc+;8 z1pG*RD2?_`6N`;Eq>%&Ud=cS*v~mq>fWH`;Q%{N(nemkl4!A3g9FS<-u=vu&AL%m& zX0EBPiw~jk+-dmYYU=hAN*)r?15&96RMh+9L1nym8DFUVL)vr!?jua)81R`Nq`rpp zRz*EBxu~8OFFND7TVb*OzO-?Ga%IwLn!Bs3J;`4x2mY) zVdE}s4FK2J!xygwh#3Gi!ZC(1L7OZI0ta9M?SR!$hXEEiuRoEoco!3EzTTkvn%TEyx=#ic21P4FG_lU@zfQJ zlO+67a?k$yz2(niUPjLG&OFP^5zBmwabln22RTB%JTe|nTT=I!wzo`S<*m+rP4I3AED-ZMWUnS)m;2MGKDNdFL_Ry~Pnd>dw^Y4*U zEn7b&G2hr5^ufI0HInus-y-(&d^RZ_*tRS=Nn@Q$?RLG-TyJ4t>-+HcbG)mkf_8Hgu_F=6*ZvVa));&L8QQt7& zke8pk^uXqldJbZWKc55-$QdI8aDb)Xw_@|$u!`&K-z6VpN7>lj|f?`LJscjzC83Hme7VJlyR-UEmu z`t!Ksh(_jDRqk_>*)Y%jc2*325RALY@b`@6Ng|FfkY3}!x#w>x>WST#)L7^PT?+K= zfyr|HwQ-+A#JGVO;J|Y%1RXP3k%K+YwKpAq&zQUc=6Q<)xTeSWJ{`NClJ)#SpC{G= zUhg|t<;%!=fM*ZjjN&iG=_AlQV940*82jKhy{8^_IOFjSBksm64%fnRFMl=e;n(MW z{iyfqcC;x5t_A-8ZfssXEwVu_1$XZOJcUO(oi;ZfOp3+w&fl@^gvT5cp$F*$HVXw zM!MIyg=y;OjH%<9ceJA!N6&S0jcwT0jF)HpT$K2>aBlx=>G<>)uzsnJ-wDw+z&gM` z?w(g))x+o%(HMJRbg{k{u>=psWgl!fo1Fdv%!AZ{E&S5ArsrnZ+tk5{W6seQmg#5L zb2ad+=j)Jlb31nXea-bd#`v-Bdza8g6a69qc>*@Ti(?$pa;rXjV6wdSQpEc{6a^Oy zKd`FC?q5}t=c{V_S*#bFt7+T1jb)x$m%!ZIFTVLZ&gYPKV6Ii-^TQ7i4ZRrN| z)g1&5iAB!6`tUcPvAf1>1q~dz{00KgQv)4#c;P%OS>D&w%q_j^!#- z$JJ938p?9p0ehfzW??-l9`NJRFqlA$Zr2&o4~i;ZuLLKl#HW#(}J=Km`a3mg*gQdZ1(ss3Qtx`1$iSsVcT zKRCIlo|m$rE7b|92gaA{2gB9{gagds0O0>0rYdSAq~RvTq_hVRGq4<{F0gG$<Eq zU7(^qzw46PnbKh^)e5l(sOJsOSJ%Sg0i0=I#yWV)ee{v?Ohr91bw$mFILxG|l=lE| z`?jz+0D6URK(_hgpO7njI^{!Fsuw{I;2e@ra+&bFu}J5rY~w4{eFOXd;n}NdDuUr9 zMP{G}w$9gg4;QO9!q5k1a=e!=v;0?4pV(DV6B!t)QpE^+pio--jW9Ss&LPj-+CXJy zNj*Qpp(RCUs0RwA+9w9(0r3lFhXWA*cV=o{Ju5>)RH_P$ubPzHylR5`{Tm?$^i zG)N4g>>*C69#B!A2L2DlIF#g88SVk#_{<&uDz1`uF|t0P~dQDlb_I{S?6dU+uoCcAGF1 zr47mTz?S3c>0^ua&-KFtvQC(cE&#p%2ClbGo>osyZsf?FP3nQKzO24-Y@z;eKRh5g z%j4w%&iymj)M1mxpR^?%J#gDe^$hq2dbSIA*0972#J~f{{|)^A{7^+bIUOTLYGGs# zz&}t}tbL?ceL&6@iJSvY$eG_pA}+O!npmuz?KOTtY=ZE40Q3Hbcde*zPVGREiW%Jl z6HARZ_L8?~1bK>OoggG0sHo2%*5_42^Xe;%9%RzE)c3%|a^pq7cHsR5up`7bFu)oj zeEpBn=D)3?o{{>&Ar;I`4@{g>&xfz@Z-e>>WWBI=T>#jB6u$mrlIuG+u*zgZ3#G>Y z3VIHS_y~jX0I>f#&imWT>e-nb6jJHj^}tBEHU*vF(?N8C9hc+`VxMz|J_39H$0WB$ z?jU4LgW)gyMVv!W>&6Ab1MB(#=KLF&^H1XMw;D4Lq#?QPfm_e1XJQTUL^t~&_jUn% z{J{PjB%epF;bL^-$1bU{LaAQzJC`JHdklg3Rm}H)g1DX+%UM1~4}WQ#tp|pe)C6)I z&SO8J>-ohKrMlVchS?mu$J(1@EiQKmH#j+zCvLBle;VGJ+mw7Zt?5MaF!kjrw5=P zybR|VesJ4z^-5fC2L{jK`iGwJ+e4f_FwVIAEBO0|yI0gN;=V84jr9%hI?6ZF1JVO=_W<+b zFh?eE`mKzqV?2F}Bi;@Ceh}9K;E4tNE#ta`>k6(CzDX@eE#ot zT~;&Vvyb~pCVM|+|M!KVq0RR9e2wbaKdQ32P9cX3MTUls+JELOM;9|Rw8>AVsEZjI z8to9mhwJZewnQ8Sb;z`Ch0i()>X5mPcg=sdtQYR6^PD9zG<3`+giqV=+SS2__CB)9 ztXF4f$bK`s44%XKKikdhGR1X?j+WVF=Ik<0ZCBI&Ozl;+|I{v1v=?K4ulCrI zeqE-cRrq+l_3JW4ytMyYA0YU&)d2hV`gOa9+A@w-;qy_e^=%pZgV4pSSGq0Ztg@}r zZJF!MAKU)lu5??bX#2MH(yDY@W^SX*(N?9~G8;wOmG+ad9=#>Av3u4VX3038sJ^wM zKCUvYhFRro>}Gt^YM8IgX!B;4%*IE7Z(1O;WUk}IhJUTb<8xi$^$h~}pfwbIWm-?w zk}2ZF#(&53J@C2yTihssPda7hTxE`Sl;bWl8bGGmLAZq_>vFed2l>l1JIG(A*+Kp? z?Rv02Zo`oEv+C+6bBtfMWWCBc8j{SYtsK`Ok9z2iZq+A7WsX@gbKd0~4M}FyD#!a) zL`JjD0c5;~hVE!x&Y&`Q7`hzq;iHieiVPlV*K&H1@gA!BHu`^=k~!AwAKyBQj0&rN z{Kh==lCEPlY#Bc+TlWeg6Qo_feR#Clc5j+T*R*7ccf0T!3jBE~muYf%5SgOyy_)6B z1(5L-Y0gbuo~fRjx{TEzGMjmyE_|qE{HN2Q&8$lonNi;7Ixe4ofn<1}qE8|C>Sz#| z>kusB^7(h{I=-Cq5x`eLWR4agIETyUUsy7uMcic0=SKKS%lL}~lj(9F-ePkaWxCW! z*SVWaaj0uQZw?^y)G%dOmd`(ZucD6tzS?{p|10A1`4?E`x=$haN|)13hFA_F(}jOF z^ZRZxDf)*L_H0{BLCm%-4`W7V%5OJ>eT$XZhfOQsm$ zUO{B82Pnsnf2?)|k@1^UtaAJ&153tlZ~!{`c?FU28$eb$YR9N87?&9TcvzD zs4e5uL2VhI4jQ%Y<nkL}oNVIY-xJJO}i#-ef!nJ&S3z%S%Q-ik6HQF!ZD7 zBBQ&xCF2Di-GyCbMs0PbsN27zy~@nFm0|_u;5(LA>|f@Xy*}67%juBuc<{QIR{wad zfb}fn{!p8b-MW|i1M8G=e;Cc7VU^>~91a=Rj&2U0b~&z{=#X*k#O4rdm*YC>9Wt&R zG}_V68blkO>rscy+y)72h_&nEI?9eY%5iI$Q^u`f&0^N?Wpk91we<%w;S+4wYC zkJMJq#tO9_m#<9gQP^ZQK8;alyIfWPH_B{m{Lu~NY?L{+L55Z9{MQn()p>)A)rEGM zxz19o2t13Qb*WAns|)Sr6dmPQU1*oN?vNREsk2k&sEdqqBp!2-aXxrU#9klAL$yXv zN824T?WWtyaXheg)9o^RZTolBtv-%Xf6P^;Xe*^TMKs0maa;bnU1q&m9rfwx#Puq7 z$aHk#XuJ3hnT`%R<}PFJp!OhysJ+hi#;^CXL#DXiu+jBfb(V8|UFMjV%zDG@)#)r} zz0&sTbjt8u^Pjys4w<4QW2=p$U2P+x<5rGxte!sVQ%)9*T2ykI~>zNvXase0P;By#&Z6D>BqcPRGH zscuONyS0D2He=4c^&q){tmVFkikSdmb#wOTM*(6FKD_($>dl9a9!5oD1px!;@4-3Q zgSk~g!h+g;xRW#nH*SiYxTUA{UNR%tJl;1SRx4BW*G`PrYJKuGibsh6|EEuZPNv2K zV#dhqRFAyNx@Pvd6*9X9R?;*2I*+p|t})DG4@Nxp>i1`obT;PSu@e8-i=UAv_~*NR z6=!)vW(?K@ z^`4V_Lp|zpG<9Z19jquAWTCM;j3Tl&e2bxnY)~a?mj*BR)2AQ^2`f-la)aOS9oXK} zBk-vkw8z=&qatwyg()`Jm(Uc;pcMCiK3#HA0aH@W$ihn7BY>{xr;4bY6$l-aZ-Ngl zkP=i$XSpK(^GTB%ft@_Hj4Z{$&rINjvJ8pLC@WsaI68ZrDODVT?~Y6R|ATqWi^6at z%ATKY`Z5fg#2pVC5~}LXISKX=vBb7V_)EY{J%&?DKXD&mml6oO`7Em>VB55V&X<^UoQ_A9uAj1o+1jDQelpIrr)J8 zfI2BfA5j*1TfaIOwRrSF&*R<6MPAZ>4mME>?CdCGT)gtX`(JL@295OQ5vUrK}UjzmbfB>ayO`{A$;F?*9gMvXhow=vO1_KxuKJv$TeIIwax*TwUV#LW!}Jdye%MkN$WN*!drKXlKfB zn9%pH-|nYc^9Isej-|{PuQ&|QfV$uUnP+jg`@_7D&lE(T94j7dZl3$E(%2jWJG;1o zcP%q6^{R9^+Fzn%&aH0p`X5d&{e0tqa;0Ix4_*jeHb%VwL`-qP`HuhWy}X z$hwP(bKBb^laa3AAZ02JBP0cP&vwCdjF;*iil;XPxaP5j(_O5p|Ne5U&X_G+toDl! z5EOUWc>lVdk33I$397OqozFc%m2|pmOXXkRU`=K$0cLHB=7AegOGaqgezqUKLi8#- zhLFNB`ATU1%Zy9cSvB8jB)$f2=5Td$);#9p{eMx|O4F&by(-Wb^vuu8K>G)ar}LMf zDb}rl?Tr@(*R@~9j=$hCkkQ+(7q*%h))i=f3Ppn z$t1w=FD=#e1j}o5KpCsz#dBI&2$gl}rEs&t_l9Q(^Al(k7~z=M%Pe z)4#;_8CAcXE()`9y&BDLAn-3m7aLLys|7#Hu{97+&wqx-dQn{la&8~8w}X>K4SL%J zJ*)EivMihPFZDJ^3L_;h>XM=z*aXaY38h7^5O=X#p&DBA$^A?@^9bRLZs7(SlCfrNJWw?uWWCb7%lgjK#&$x*up$i&#sF*$E|ftkL#|9_~kup7{??PKZ@?mblLu;~BtW z*&GM9F!oRVidavw=2yMWa&sM6u2f9a5JX-~`LCzz(WP{5y_);f!KO;%zOA#aagZp| zlo;VXoGt~!i8DKlRn#%y>@pnwbHd2&oVs&4S15brrUP|}7dm%@@7*e-Dp^I(Y8f!= zkUQAk1Ys84{Ky>_9Q5Bq+LOydI-Q>f86>^#+GD(ytWEJ^{o^i3l`(K8IuP|)%8vdM zc}$!i=h%uA^^#;rw&!nGF=>(LeJ|ZgH)B1c9oluUEiUt*jnydof9!lJNa-~D@tn7a z#`BCE`zKorLo@-4+Z14MbAc{Tp1kft1ZDBlUrB3sjvxKcg~WNxa^NIu8acKDBOxvE z;d9MPF$9~QQ~7Pfx&7;#v`_8X1&m*#LJ|tR%@i>Ix%(6K1O-f?K%qHWqXLuhVz2GT z5Avsep5fl4=+zu$q{in$bn6Pgx90!-#zO@RAqMFtDlGQf;;CP4xF?UQOz!2gGjC9@ zM$520|JOmElEqL)D_Yc@Q+%n}DL4KNJ>zDO;xP5HRuzTe+rA|%wQP6$e^=<{@yIbq zXHo`XDXmw*oSHe=1mJ^>^UTE=_*~<9u)X;Z0XA8%98f(|X zrq2I2O8Ld=yxJ{MhQ1SUrZ=H#Ezfd?*jgT{f154bt~-WejLK#IQ%a=6d1I8>eZs^2 zrsE4K%B_93>h}e>iSEq1kxGkSiCgmYUS9_0@xLo63zDq2&97>mEg{;>dIY;8H8TuY z+ST|oEjd}^&Y}B1H)RXN#|ILVcjTPu+>hIxMF6j|pJq#^&UEqBKljDgBgkua+`UA< zokJ*uhiDrughxC|){*aQ!P!C3a#~%QtVPgz_~iR0@Aa^yVRdezaOWlpywAH)RrhZQ zPKEBw{xv6Bb}jYF8x%phttmx9zUMb-LL)+so$e)tbp_qoI%z%cdy@{~)jJlj5VSK3 zLH)AsGQIV zDP5K*ce})?vT8|Erf>%>rY7idvczoHS039o*>XlMfxH4*h}z+tGC$U{>0uqLnKrG< z5~(yK>xud#OY3T@ca!Vu?<){Q!CSn9m_IxQF|^{0|I@K6eKsjb5(k&${5I_xcS}`q zlON9%8D3(5_^epDhxP4r9=*)myK_?dBHN1TNR)0fuxGHjY?{Zr=4h#fMD9D;-(xe= zRK6^|tr&rqS<+NL8!W{Ako_`y^Q0AjB=$zc5<;IE)k`WbW#X>>)lttg#-E9KDNjC~ zsqEQJCmubJ>PqUGk(bFTMmIL+#fLlUS4Je6k-n^(PIYu}$l*@T8Md@!L7w5dGU;s9 zo%X(p85Z4yhIpHvUjtZLi|HPe+~Y%P2-?+ASHxt`Tegf4#ai4>Eg;H+$uWrjnIG)t&0XT^}TpZ}|}Pd^>I#TpMm zSEC)%oj~InusCST--f?X8IK*f5TaVfs*8Ycw~mPrcB?Jy+wcMYKI;m4W+inDON~m_ z3~a4L$=ffu;Ky9dcC8u{#c|(?Kb9xjFuyWyNom5p4R-}*K2iNF@sZ4^enh_V;;>{N z=~tVvRP+&COmT5gnHYkac$stwO)gl@^+t`bJL>y8Fn6*a_Ch z_xQty0JeY&cxmrEq(h{`p5p0YbgBKPu)vv4D_(+X`>QOvdqsI=2bh*Y|kWhxp>6kT|XWOeVcrx^1Qbqo-^fs4tcVDbx;+ki??);|)`5^a_}! zbW(PFJ1aY?PsAs|ELoGd_M8U8F+>=N=#8WDl92?724DZ`p7U6Ds-hE;Fc&=rNJT8q z+7iNvhqQ(}MFOwe_RE9}4}k^FYw z(A`$F2iGzj(npEHEUW0QO8oF^u0FfD`OHWDe)_FIeF)oud*9HfMq9p^X~wG%+#UFg zVTUy_`x0Il`2G@}eVzGOX0$*O$h3vqu%JrD<5eeyiGBR@}`rkj`mSRn-c5O@fpyNB}r z!$;)HAC?Sj&HYdnK+?M8+~wm{*1rAJstOpnN|Fn7_U^(}NLu&U>)E+c)se7;xBn3R z3|nULJZDY60~zIsnp0I)OH2|9FflSW6O1_lg#(M&b4J=8$bEHJNBdlQE8bT2Z@DlE{f&}OdK^c< z=b7Nrq1fIIbrh!7UdH9G*QDo&F!9Q=8Yq(12#ULT8eLjOZ(GBQ283-?#nYXh&6jy^ z(`f{C`e%|*(#c0M>EYze0CTo#PUbPhf0X`){D;5lKy?tMm`w?A83XnBP`sJQeSflj zcz-SN&%qa(ej)|T?m@iEO*abe=y7R8JMeSj4*yhD7e!H%qEOhb|6kpe%~!8IIeR@M z1a>ZtRp3{}Eb^ud@0g4%yuBB;Fvst}--%l04 zmP%^H_BT3AxzaqM>=ANT1C1jHYty|bz0+j*Fs>Pidt$&``P9z{;4QSMGLQ$fIQ{OV zMxfFyG90)!q4dkT#?(D*xcD~^3~xBJJ$?W~Xu|#zBKEt@P>MSrtO4ch0k5@Md)VBf z5#H0FFU7U`jW)nE)$`f~vgXX*!QmYWCu@ruca zaaDh;a4{KU4v?KdV>&%PkwkJnlmtOS|F`?2pt8wQGo`Vzrz&Rm<*|E`_ z^HA56E_}8}kXwsJ`V$`v=|ES=d8Qlj^;d-1*j#xT1T8#Mm%5l}>for~o~5s^KZo5) zv;$q?6vk0m?t6JF8X<#w!XcJAvU0R*pMFapLCvg7ZgOcrJm9sh5i3A%H(AF3MtR$P zjDY3GmyZ7RPZC!WBP>rclq!gzCmyy*ZFW3JnXJo^?iKxTdpKQUN*98g9D@ARF$=n@ zLa7R9gzVAoMmRe6;1<%WaBAE%2GU_O?30=<6uhoQGcA=9fg1Mjr&15kC2M*=aJ+4z zukRnS)rPrECEd+S4aq%Q{Ic@EU3}PY%Hr_2Z5HkH{eBf_YlM^n-l`b3X`0l1Mpym3 z6>TJ0DUn6hq}*1&LQcA!*z{=n_v09kdY^q#-V2<;7hW|+g6tTD$RTOUOQGr6#AL?= ze0T&uCShake9`df*F80^f{a_w0$x;aOMo)*flhB)dC^>2#hvyy^z?|iZ5PxwJqv;z zD?bTze9Lt+nYx^0yDl`Hc^d8$h8jNLB`9dIHjtSP%Sx4uX#F3;=B)XQZ_=3c!9hr^IWs6se_!Q|z5t{we%`>HCf#+VTgk(%{iaN$=drUhle+Tg0Y;EwR zxZ-uTI;NzKXy46h0CvvcIq7m-d7q7|;EC;X7r`{`i;!PYkE1hJh{OHHt%~0Vvdqei z0-z66#=tmD3Mb~av(tS302vK=Go>qq^UA{_L^rq$__UnQ6xLr6Wb0r&ZmfV9A&?YTHld2^w+R^z&A(ObEMd=S$044K9)~ zgPCX@$!96rujiEswtLa4jdmK!2Y={qA=r2J5)}@1^?MH}V_J`BM?%!Z2t=$T#ai)Z zQFjic+w7~!g0CH?OZ(SdL@YuHnP*-BD9l?V`qVlQ!3vn`gi{=6errS)GcT$bns%LM zX=Kg7R?zWgtDtU3C|>v(AC>C#ZsehN=becY1OgG_I_s_5UgxI%6+M_&8v~A=CS7sz zyLg?nJtCohy}(t0r=1d?Fgp)$??K5M**f{+gbNlaGksCj)zx>K>gthX`I|bRd6JK3 zN{0X_IF0e#tBe?{!?MJZ8*n)c!u=W)^C%<&2pJP}JO^U1FKVzp6-v4AEDwUB33@ckX$ z(`F#;ZD6OBJZiM%Iq5e|x4LP2xU+wzRAA?*dQP%9%jY)qs&`C#oD;t?J2uYPr~0Ra zs9}Q_>~MigmYfTbY8}|q5d#2Y9husB;d&&DQ<^*b^(P^iTq&?2RAnFcnTMx2#9hjG zV6`)b5TpHwR_~>RLT?(`X*xaP(CW3!{OLxqQXv(dT{)OIbDq~&D#KyKty_8NCztkB z)&Vs&&%JdEr|W_rhpG?9O3lL3Vb$+MG&T8H1}S4SL86?UuPvf$Gc>z!6ta7ZIT%^IgPqRbt+j;z zd^O__)0H7ZF_t(@`x6o7QO}-S5SHKO^ER8ANxg5QtKav}sw` zFIHLAOI5(k;(A?MGF9T}IcwWAIP%=oPrfQ2zemD?Mr!icN{ECZFnofz_YPh8i`36Z zXcE;&?FlZSj~8q6-Je_s(JbvPkG$_`+ySwFuu>7tqY9`(@F4=u_RVb@OH1YWLv{`C z8TX}>7mM8qciRuKn65YP)p(>lA9MOOdLbN5IVokFZV_E_z`nR^+UQo1Zg9yjP3 zikgKM6l441zQSIJ7lo6iRQVlOpN$)x_p+S8yUFve7F?8D>JwhER#B(>6mPQq49!vY z^0kl0K-~T~$e+*97e+QwnrJrs&%<|oD{?WuWr|xBrDZBkei>_gTZPhsQmz5bup2-v&Y}xC$9}-Jw;kSoQUY*1$j?S z_*0V`#ZZM*1W~_pO|-dcp^lXk*M51}umvf$aW_*?e04cEvm0k=kQH~(p}2bILHXvq zhpgC^B#Y)bC^pyr@D=JEx-cmUz#?L_GFCKHRQ=6W$q@%1!H5llxPw8YtvOdj$4$E2 zFcPJd&-;#=hst}Q{axEtWP{5}aJd;OJV-HwCfEFnYSA@XPA&CwG+!#JJ_)&d@VA39 ztz<%8>v?cjHr_~>x`QG}B2dI-rf`Amd7qW&>eKNgJW>F2>78x5q~oHhnVM@RS86ic zy=;4zPQ=WiI*nUP-S%6k7TW$k^Z0;B(N)N)S?{^bwgRw-cC6Q}AW)GJM+(ywRu2V& znK%F_-d~S4DbXO^7`p$+6+2_;dSkZHxAF0l)-t>2=XkkXHJXZt-`#NLD$XT?q=mqw z2)oO;e;**GB4VsEtUCQ_kO}0-W7vg9vQ6IU?3t)?l4LVGsi%~$M0g+UiCYRZ$-c<- zceH`wS!1Cs;!*WMibKn&bY1}zdVA>zLsJAn9%8I4ls#O|+S)|z{;&J%?vKJx2gr#= zS^>?u!7Jbx%->K1W38oruS1pWm<(GTIWH(3(H7#aPB5)RwnanwAeMdlqz zZPn4aY)oV_zWiL^9f^cJlugf4@?@p%30)GuKw#1|P|EE*DJ4Kp0PR{B8Bl}BjLsui z7;5gPiqnlD1a$r0Cn5uy;}a7pLTH#Z$R=Nob;DA#eU}?R5#9Yp@JB}#up~h^fn2Ou z_=9ad@l1M1j&|wWa6eUaL@r6BqlIs*ZNGHRsg|Srbz{;SHO$w6!K}PZk&Ib8I{wz9Us$z5eJv6flCwt*fHHLJZr}%^$;LXkle#h z{07f&8m1#A{ISeZF5GwTW|sI)+_iZ0U6+nlN){x65t z8I-Q#hz9sF>6`qEi3p!Oo-=saaSCM+9}Ut(+f9B^6BZE&IzH3&xe{@nmy2%T&ZnUj z@dkH2aj1Peea$<-=UsI}rUixS1?*=MI+*Uo*DBb6eh*TdP+t zGrY(ib~%xu8>LO`mwbiNm1SwOiUcZf3OAr8#K=`M+4iKZ@8bz@C+paz=bf+TLZ%vT zn83U>J^NWHVno|t0^}-lbAeQw$d8bwskbuR8G^bJICNji!H_dJO^;PgHJ;P&$CGm| zOZ#IFRVHmgtYiOUwl42}J;UKo(QwM4mY3)aFlv)~Kx(?8#}MI;a-JrFSVR!`Ycw75 zIP<4zoLVNu0*rIH9(Xc&G(%I7VJ&gj!P2(_d>8LM{5{1>K^)?K-O*2_qy;QP?{R8! z8yO)6QVY=(rI1*}+Y0h5opS#p9%O1uJH6sW>=$$6q0N0#&;tc+EY|4<5R0N`s(-)-R$g_ zdY`#dGuz>f4Jw@3v@cbf;=2@`iZ?PR*@%fAfKr42e}S63Gszbo0+Zxs_IL1$NfPtL zi;zvpQn~+1KpUpaZnmN1Y1hS|<##$e%e7=NgFOsmZmTtyXW<2xp%VUKHbw&IX`%!e zuLD#(LAIWCKXdOR{yI;l9RUeVP0csRt4TL)%6dHWzx)P>gwvh6c+pF`Jb0!dy?0LQ z^!%hCX(vSlKg%|RbnFVmeG`$Vb|6xaMLb>Ud*Wl5ufpw|JzHTFr?A^5Ezv|4^Z7at z#1N;s-7{ok1G1BecAk6*6Z8h|n)){XB`-n$1&bM{yb3$KTluM481A#`zTQ+e#e%5G zbNfECq*KoKisYjF8^?Y~m=z4tQT!~03k0%A%tYszz#w|NO#{lFE2RX>MQD3gs}npo zaw9|0=?5A)@mOVlthDDI_ifs8ho(yMn9mao10YUCN92=0PHLBPQnX1R?(QiV^CTjd zSguc%A{KymP^7Rn=W-DU5rO)JS!X%pB!{!Ze>qJXHX49!jC-r8m*bq|4|6ObIlJre z3g;l?Tb$!_7j9NfYQI$FX6#PtpHs!&g=Z5_R{)9JNoPOPwNlgP+yDx;vI@xp;GA@M z1dw!$!a2=O2&#?+rQK6#Ikdb{nokcuG?F9Hib?m>h{Kl4m067nOm}+L^_}N6e>i_8 z0c;0wv+7cVPq%LWmdd4qBD-Kt9yOzZ%FwPh-dnUY0RhP5?0j|Z_7Czb!%P)e=!(va z^Kh?gV5j?-zb(H&ZtcW_VaTsRiuWu5RGp-R_&O{f12Mi5^F7axpPHE<^OXCgL7ExN zO!fprRbUYsuLRyK4Owke26ejkQ*tsd^tL_K5Qj|I+U6amIHm~Lba$4~yaYl&-J^j{ z!1EHR9k4n6**xOvkBaY0-f>8nilwuT)S4VAl)zZ!{+JAU;>#j^F%cja!w_Rtx!}dq zqtjbXcYc%k^iFL7u9dWpfUIBj(>|WXU*N>DG0ETKqkvqH09JIVx5LK1>ioBWT(B;EG+w1}b&tK{08 z@aWF%sz#dBzusQ_bq67?y&OIiE`tgWt_g(v#^eYqc+gxcm5+he6K2;|9e!)eons#M-?A!GjQqBz& z^Yv7oM9$dy9L}G-n<*BXL8KAExc*v}U>J|QYuzEX>rGSHClg$Ps5$)ysXY6Hi@bu> zBOi?op%4MkC!jVe2bmo$EGDb$&D^jZTw6G)m7*FalvZPkl&kF>UzhFqZ?&7NH4Yf% zDlc4F7|l!jTP%$T6x-Lg8ctewpx2SpGaKqxTE<&}4ZgTfqeN*3cf?Di)O z>Hhi~B{zPCe*T5;M!&26M8?lvHaGJ~ID%!@yLi#O#%X0>qu*UWdwyz%V66da-pSY8?d%qhGb=Q=C?PRMjZ6r8JS%cmr&c~2^X})}G z{^YyYBV9+4t7p2*=aW~#W)^t*Inl}`F0@@t&5wDOB&4Ma&syzQ@5*Li-@j&1$;)t` ztIfL{ea9T=W3i`l>$#Qd6?37pdz;ZA1EldNbIW`(z8L zAfXcU&v=oa>n9s`$BvDsp9gi=1u18qzbj!)q4;r)o=dE@LZW36@kLg{F0k{8{LXJ= zerTaMmE1%~TG*0u8pB_=R>=dbUG^_a9DR#)*L|Cb7uY>Mq|eQ&pK9_It@C(z&uqh< z8#glJvVVm?RmyeKrM-^%7J~KV`AZR~y$(jdTN(6F|7i8E<{ur#nZxUMeWc+#*`uwF z!bqdyQM;4z{`Tj3`^zW=)mXlvmy4Te$|}-Bc`NnLj(QEPT||BNKSaX3#V-<{R_M#A z!kl`bRo3AZPt4@YTk%!EaK2M82lK<`(5uvJ8g6iB7K9bi8!XLL2HCwu(Du!B#_)yt zxqC?6Xj!Bmn{#_B%S?0@tQJAu36FFKw{e4- z9(Skt3@zVFCB*D&zBcSoGIPZaWhQT~P$l#khO+Od#0YseT)NqK_~=*0+JKMrnJ(Fl zU_-nG=~$?ekEo+8boek|@lo<|*t|Q|J_9>%x8T(5slnME>NJX+=l}z&H97Ul^47&r zsiaD&#LCvin#3qY()sP<8*`-4GVaiWx18nRuD#8)1K+w2Xx(oHcSzXg$o_N;30Au7 zkL~P_Z25(S%TxUx&HGjLX@3>OtR&{{Y{BKn>bhY;Qp{aFg?N7Sa`9>kyZf*(T9lr5 ziy{Mim5!T>c7i!`QpJPyPwvsg+R*s$;k4_lO@Z{)ptlHhTClL<$l)w@hRtixk5cBGBLcrGv8a%`oLT>DgZPxmvUm9KbErE-T(-cX1+|qH zbpYre0KTNIO+F%U=B25)SkVSc2A1#*gACXTDR_C|;kDc9Mn>5iMT-Y}8&+Pb{mE|+ zEWx7vdS%fOFhjRyS=XhR7Bv-bJ~poaxalIh_9%>UyTf zRIUxpXd>b=D24ZjU+#O&Re8C$?X8a&yxcIMtZi~QCB8qND;69duiOmE`0o5BUY^Iq zD1QX~ekMOev~j38Sv%<-!td=)em$^W(D;fRy*^(MraoA3r;|JD&dYv)4hlxPm43XO znBtU|*PKp^1<<$?hu4|_+Z+D+28PDAz7(lZJ&iGVA+^r(MV^cLVIZzc>r3oxFnEA*>&?$1i=a8ON{0}XTRlZo_Ma+sh>%u z+i6*Poz<1IfKA^YI|Me^#Fs03ZSgQ+NSUQ8#?H!B)Gnk)(72)C-mbEEIH{%Zu23P6 zfH^N!sOQ|uFNk`5)-BfT6dYMy5_nKX+%e(ur+$2RK*J`Lw*a0kk08@E@j_Tl2_ z*Qd+V{hpwj8;(NA@BkE68z}fJw>gp&OV5U{U2PYBLL0V=E3TaPGq5gA2JOi#zX28! zLjcu?j23+x4pd$kT||tu8h;hSQ6h{V{TjW8=NkTnSGS*V44Zov7*%~~a&3G~T*zI9 zooXw~R&VAA)JsZX{CX(fzTEtPZC}BOjwo|XZJy=rs~)+tsY#={yY?d`pp-{-b5lq! zWw$o!BDqZ z^{Xn7k-JzT0K;Xrk5y^ZVTg6Q$-?jj|xhu1Ac=+pQCqr>? zPg=b3;OHfz{d-5zz&wbL z&}zOdu!xb4w~xo)!Q*};6A9N*@5cD&?9yweX0p|AU|Z$1@}97ii4=?(p@L_r(%%!! zBXaK^TOaA}%r(U=we0mx>FtbK5_d{iYYm;ZbQP4{)sU{+ISD1$IVn6ZaH3=QJm*XJ zIpYVXS$kRLk~sdhh7T_A_OXLQolU^p8UV86@|Q)PfOad4^S)vCB>PTz(-DcAeSy&C z_jRab9fmjPu&z&*3xmd9+pS|Mhpk0kH~bym57m3k7I~Q6H|#(r5<@vA#y%nBG72U1-kQ%U9%oBQgkC(kOYfV}zk`jzp{@1K-?+XiSw( z)O#t$2cIdCQ7&XrjDYX}voc#4GsS6H>vipDK~ka3u#N+xQ0 z7*hj)^JfWcuepC6sowiZU?5i%w|TPITIAi6G53y4L)w$dkVTlf}U)h49s0MUx@>kA@Fw?x$4Y;P?<0# z#PnYhLS$gm0Jt6&S?%Y(qWTpRC;~MZ?yP~;h&NqTmJ5BG=0!m-=~#n3>Srp7A6~kA zk=tB5uuH|R&Kd4*bn284_ZxQ3_M83=Ivmbd0RZUxQDu8*C>kszdij=s?)q-dVT$Vv zIRMqS%LA}(>g3RmBHS;qc)&{B>1d7)_V~T#yMOTF$G7}x>Sz=ju0~0MJo@t&m@c1z z&tmb#mLkES3!e-u!In@a6<&}KeULil(sg97G9Jf&J7OvR4ch!Z-h4Qase;G)!jce+ zi;MKx@k~@uI;@fzk!cnwD9_ukBG2=KwJKcg((yJi_8>qCsw8IJB2%;Bhn z70?^m>Hob`yOeIPh;n)|W}@6eljC-(qQ&?3PIZwBm)#Zbg;F6gk-T=Nlo#$iDQ{3o8;hr&T_Oom zMEO;@yJRuuk#exEcZ3UqjfY@+*h-f}kETR_r7gPrQ#gjlAT@TVBl)rQOjR$SJ2rR8BKDKKJNR6RfJ1S zo??uU9~^`bMt=vE>&^@_pUn|q-@9LJg(Rlj_r3~6Od5cKzt_WqdO8Z1@KI!OWTCx{ z`q$62<7w;Pqe*sq3v0C05n?sbijC`TA};(9UAsZbx3DLLfnAN-Q2}{vW_?;>AYgKee>X`(6Pr92`8?o|$(ZLbg=@vUgnb zn!u}Q?&N1}>X}Zy7-h!k65;xcv0cMLeOmKlOLiuYT$Y>Cc`Rp~tA0hF+JecM8Sj0= zJAQBbbKxBGXF@Cun6=3-st{nyKY>-h#>`F3#!-K@YPcU1TQ#w7LJ~{dbj6sX>WyMn8s_7`h>RkJ*R!f3$O>>5y`Zx6VY%>)0;VDW0 zvDrEC_W9|gA6gr>@X58QVta#B|sG`uHGOa#X1OU9}4kck{Mwfp~E$ z@*kt1rTujiDI+Fw8Lug85q4+Zw24QwTZafrQ&iOI1ECeS1&d36rS~3*^8;?6XCCj> z6?=n%@QL~aSp9O!DJ@pEI>_t3qDu%V3l;mwx+y3LWXj>z1ijC${+c-z8@{5aPwF1u zyG@2_XgEt#uVqKv!}IpFo3&)lUk_RcL*k%sp_FL(^yG`C4s3QB+1R1dsRq8CHh>fm z_T}d<4&#PtqW34-Ij(4y88mkkNc-y0nm5#+wH&T_Oplry5k5^5eljpHFuz*x4sct} z(&Nor_0!O!Wx>z;lJp)7NIQLJ6)XtZ8Kocur6<$maG>8{G^#Xe?x-8eJ#23*M)hKT zm0&biA3!T(P6nq}e*;#H!3$?W6$wd6n#Ap78CZhnXF4^xD)KC&NB=P;Vj+Kpa{_&z zguxMh;0U&YxJM^xC!?$_`NqfTf+r)(!D7Idu{G(?i#_KNx@tmws<+c6NCzsU-$_Iy7EwQ02vD7HO{y zr9-IC=*1x%7<=M#;p}n!+*l_3de?>2sWq_u+TnfPUE3yw0sj^ww28(Gp((LSID_w5 zQ0Kulqg(M*HMudpsnVX8**oz-B8l8t0oHVTPKa&E(b6BwwC0*#Cn8p$0t#c{SaZjxiDDwH)kDjARTeZvU51h3+Ue*};bqP)x6S7Gq*V9Pp4& z;<{4^(mkR=vOihlwZpyUb`GWRKmbCM-zVf(q=*BdOJsevaaQdX)TD9B?Br>**YzD8 z_5G}o8#w2@b|LL?-^T_dE#4=-NM~GxX@;RHc%I^g;pYiv%n#4$MTIg5^0rJEp5@)j zaC>LMFGhqh%X{F(b2cI1U4_1yFj#L1>e%0y99=d@-OJn!EG2{7lQk)$m&WjMa`a?# zKg?N{8gwqdqZKHkvrXJ7;-mOLi)ZuKv?EaVv)r3Bn5LxphX8;I$yjv#&bh#Dwp z!84^+>7b($hb^jR)0!dL{?MfLf}pN{Z+-k@lz?1BMH;cGF(s6lhyYAa;Y)`Ugs)hn zucw<+J++)grkp!P>}a0zxAqWpA(!FlD1(XtphLtjAKpmWtbL}FQ`a^B%@2eO(1S)x zE@t&s|6?@37OZwG{ISWC#|cTLD!5_)!^xP8IgnW`Igpk~EYx!#nuHt~U*zxeFa5zJ z0$7!M*Uj5JK~C~npFz+_D^OsgE3mVc+XyOd8-5VOopp4u9}>ldxGA|r*v|@}L{kqB zeRe4Q_s&WO8#2`@N%kSFuQ*Jq#Mjr&DJ2ZLao{rC3TiBlC+bfRYO5XGuQMr#B?q85z)aupJHo<2>h{P)78n; zSB`mhf?^|2bPApM@o4*B_TfFLF%F^%qXzd*0F3~*d09iKI1e<(<7F5ZR|klMWBShW zwitw5u69aPTI)}AoRoDGT|c&yVOri7g4gs0`yJd%>C{AF^(%@P$3beQSH-i9-3T~e zkm4uAhq)VAl%NKP8-LW{O1K}x9S)`ofk5C3yjqn_Iu)WS$duk=i^5?*+x0S=vc6dE767f28?} zMpyl%Jt528Jdv3_k$onX<7p_qlKdd`Z15zoQ#aT7++l$-y<(vbB^V-;gR&Y)%oBA<85W&WS2aj_ z5U|-Fc(ypkzbf*WORK@7Chx-SRaKvb_H)x61R@Kdlzjt1-<-q*w4b(D0!=&f^bs0% znqpkM*^PW4)O)qJZI}7YslUbL~6YjTRZs;TL7V6obV3j6^;$ z^u$C!+CspPs?TT7!>r>?XQbw%5`C#u)n@FOW~_DYiVl8a6KN%?+|OJSvmPX{-g%q zy=%j$_tFMI+>Bkf@5bA8dohp)&GeOAwEYQOf%8@3F?$2g)$Lqd$Sy*xR@$mWj?7z4Rz z?5Mj!)f*8rckT*F&FvDnBUk_g6y*dAkpY`9ckHLDCD$aFc5P=dtCJ#M<#4wRn6+Me zNdhw-0|Oxih;cWeoKVhpX#n*y=`$s|DO49Mnieth&+eyW2)U|BlUT!@`S7o4sdJx+ zwxu$Bw3{K8%JOKpv@eKswhA_*uVZ!-erTZaC)rnT9t~bXK-KlN9gPJIkF;o7_)%?y zTB&&H3+{hoXlYH8OWdnGsrrh_X7DwdV(1&ogZg#D>kYx2AoIoMRCS3NissvoFVQz@ zz!*cp=|Qr@RH_PDjI7r{{B=*1_Eleei4x8Pn@=ZyH4cf+H#rH=X0kVE2_7G z;AoJc4!qEE@Uo*SQS0?%x4qA!tpJ^9pV}AyqzQ9MF>%iV12nE^0tZ=vqTpxCLN|s- zLQbwtkzWoE84jWGWcN<%N|;3O?<{S3xn7@QE?V}wcIX6v1UAVg@4Y(}oO{(N89xg) zLi4Sd6lIQV){>acw9ORcgF1u^jc;o^iM!dp{q*29D z!(}hjaAkC&W>ZU>J5Spb z+G?p~e=KyXR#kE|Sy$EOPpp5KWnlI)v1yF3@U_?Ce1je?aN!xPKkor%5481Kw3?`+ zaO!wlOk^6)#S`a-nySuH*xwv&nO@?Ov~qHLjXMe&@sjT8J1KSCoEev<Itkez)av!=V|%WU%I7}tr+6l`?$I-8%=jBk^8-lW;s9ahhNabAgi`qpxH z)BR72fi*FMSt{;`Dd&RoyuLr5Sad!S^|h?f2iqq2f{ab}uQ@lQ@yVOEWmbf{qFSdJ zOc`V?D=tsbUt&QPe|a4;RCly!jk;!eeQIUKG2@rHKzh@3qm;#o@a{b*d}2Ae9@`)05PLZ%3LLAM_;jZ(jqeJF@5J!AmqmH2oGRA5d36Jvlxy1;ZH%f=JOg0Ye8aQ#%7>fNF$z zROySQs}|-;pL-)j^(-C@v6M+m`7S+cP5Fsa^|U`1bRK4VqRj+Q!B)PLK3I_7`}6#L z#M7M_zB{{O!OV7HPi059Z;tCcDwmlOM%eXr2$nA{TikFW8lPbpZAmHl?I`~OPTMBA z*Gw1s#r*k(=U~HcR-mv?gPkNLhdYGKs1|(`ia~ZITY4{r+I4mT9o*IYU|(RcFc}}| z%kI?Mk1`tOc&(6IB_1;AzCOHX*a5I0`qQN%2o}wPP~3;Xq$2H~6#jZ9E{4c)CHXeV z`mrjH*3IVAuxgc~{K39FsrZ{p+GHx)l@f)9y4~)IyCqEA#q|8LuEM?JK0JCday8&5 zI8X&5c0Q36tfsgy zf?2I5m0w+NPF7E@$B**!PP$4ab@l2DX{dlu7JlP{iUA*LXBVUjUB&nh1q$4>k`Dfz zV0tBrsIBk}2fy6B$>V>OV`_*9ZTdY-R>kRN8jbBAg5_bK{h!`@^`-Itu}vD-xs@`ePLeY0z;UJnUn=9vWJbuHIpwQdHEMA8YhR4X zH*}N!b0dSI;99A-qgo*{RZG9V+#XG(Y`$YtR(+y+(2JIr%SM8it|T)}aNmq|TQZIF zzxgfe&~&AZW@j=RFzH^Iz+STQu)FAJ~{Dh#SAK!Ea(Z|81Ao&|Gp_^q{%E&a8y(1To-$# zw(rH#HcbG>2b=$o>XUJ78Eq{F?v+1GIqieR`r{P?Vy!gQx;2h8X4OZ+dN zFlJ1)wG7Dbtc-YhO=z~22-+U{l<$8-=P3xbwG=+RI>juuHN{|zyzW<&@6!D_w(dJh z?{|g<(oy454WDyfDn95GX2BbFQnt7iwx&J{GVnGg^;*J8LEG~4&suSaM?H(}RMok= zN=k~E^XKZIYNiyjnEe*jhqc!$YUZ?BQ|Nm7f~2NV8TTwrSJJCDEp|N{rOxS+YJbp6 z#SKOP>2JQXy#>Q=F|;&nQ3b*F!8MRuVup@j007<0!n{yoKutP2?ATM( z2^I-VFht~15WR0Bh+;18=`Uqfw=!ITJStC^jUV%SWtj1M$tTz*=>*I?cP#|SS;XUL zxf=f;poOOR&Y?2Rc1_F2{DZyFvPJ@Bt?L#|al;?bmZWy~^-09i^Ru$BTofQ}e3mlisV5; zM$ffv5avUp3ZLyapxP;&_Pr-o?WT2y%xBX{K&dCiaH=){zwx1nCaO``pqXP@_esB< zA-VjrnNd|0gT4^+CDckE;DGu+o0+Lg4Yn&X9lxJRc?`YPJ``PX*zC*bzl)Xfpv)03kU77RH%bOSd zaEz$R#WWa3v=!@B+lKbyb+xQLOGFuBOj(G!on|}HP#LNxCp@q2p1ezL77NuYm9(!?HndXt55W=Ac7Qjt zKO+^_qYMki=!wvg4_!?L(v)istg6;3+Z?6~$(P-E-r4la89Gb*-BNKX2rPbxHPpB0 zZ&&jyp!J@ifnm#Rmm$q8;lcF^E0*UHX&r42Jdy^@8E!L=c8lXArhnV$;j+>X)GzAZ z{hh0IA|2K{w$XjJp$^?#Mw*mHZ2<|{uJ7fU$>v0H;prI~jgIicbalO8Gv?K&h%RoC zl5)7OU|I}9Da%FdMBOK}?9YhMdI{L;PzjnU^EmJ|f$xaC9o?5Z-ALVrM4HnlT zYwaNvBznaA#~7oYRR5F7JFYGszH&_#mE+>W65Pceuy>y)cji4swYX?HgFHC-`3Z0%^gyfk6%^OCG&4K zih9FkI6PsFY2$*{#r~+@9$pMb|8M2f8GsMM;MW3DU&p04&z$FjxU^)C(@xC zS%P(cpZ7Ied)}80Imq=$5-*8SV2%i0dF^M%NhC%qO3K%|ag4SU1Aa!UDaxo|3AKJs6 z)ci8VLb=IGFl=K=WtiYDLYccaEqc1a2<3)qpR}Dv+XNrs%vmxeR=pr0@epq&rs-B)x zd)~Fj-U>R{XoBhwHJ(ju57Q9vae;*>U)RYq9yO)r2F7P_Mp+tZSle9H%_DDf4lw3? ze=w{Q4)5xs6#)3R&sM2VOYRRHW8619-0MRX;0Ml+5jgi>V9)4VB){?5tm3+N zgit3|A%YcllpsaqQdOL0(TsCrwdLre78C>cxz4YYMTv)K)+JkYc8)_oZfliPzE{iz z15B&jsj;)8PH(pjk#DL>0>ri(ml431{voQ6@zDb2Kq=z5GL^lwPv#a^HIpUi_Dh6LD!sSsJZw$Ve7CNYRBh?W?ef9S z#m=ZA%yWb^QAcA)k~ja!sX^KxXYlK&ki8B>@E*$dz^qwXe2C7og{328skq$4gE6v@ zgxpn{DRF84Sdj7dNOmDnUvDK}Nyy4;saRnDg>WQ(wh~LucG#l~REHX^>88uzBMJ7G zh_NQ?;_;n<-7SA(Zhapcl*LHFN%Gs?aqCma+U0hRr-FtWYn`24Do2Lyin~RFu%$OI zNi;#D!0jEvK0Xemoz-E}EX=P`2QWH~px;>J`;g=XBqpT$=!kaW}E zwLYd6&ZyHfVM>)bn*#+W8rnZi&)~9t^{rPP)udQjbze`XSk9l4u)CX++UB z(3}&`_iu%5Tro1+{Gh$CYGB&bWTHt+XE2%K%i^-!1a&)Upi(S#OJ%RWBeSaAd4^VS zgAby9e!9y~bsg=Z3V&6_cH7~s(&tLvjvlJEm35Hz`+ zH`7V(ZQp7cdEa>lGpV%O2O~N4E5b34-;u_- zz1P(xnJhK3bLLT7MBR7WBVL11bHUg<{^_o24FxlFH3{*lEW*CmT-XjZN>WC0%lY6$ zj18wq%GSz^M#H2GOQ6Y_lseiUE08=}*m^8^o z5q zKUI?Pi7E^VmGse9Pw3kY8+hKYob0o`EoeQZ?XH&q{wv+x}AL6B-Vq9&o^$Qt=Y&&Kfsr z7jhCp!qQ0U63tzBrKogZTKAbq)iQPPsao-+cjZ6@!})9LBiV#Vk(<%dR-z} zB0*6qFA_4>X=dT7Tz@l}eP5NM`)?Jm#b#0r!PTkZYsrg~q{?IU@sTR^^H?u$uX=1t z)Z^0}*S`((n1=bohDvS^)f|fLL%sb2WN5!;5-82bD3U108(9QxKkv#^P%;1l}KGk=ZjUM~LngKc#N3`I3he8q*M_U%(O!pRCe$mvHx zp~2joeY-kdm%6A+S30{%>wuOQ^W}Vo4YUaNrHC?{SrMZ3Y?VY_VSHpFpOJQW%ju${ zbpBpi&k`QX_Lo5qDKjO~|3LD#KnW84e&lh6j}j=w=ocL!34>`n5S~%%8Z=4kJtXav zz6X~OpLMJWuFFdv=c}H#v)sML78SgBNN@eu&@yyDZ&UH_x%LJAYClkxMMG(+_y-GO zeT}XwT_`$*NY89ulS&X6ptp-aKYB~x-rR8Ryr)%exD#eo?CtPc*pFueVJz`Nyj zn%#=E1}eaIUgW0DGM*)2uW45UmB^(<9_B*+Xz-4u$E^>2D*2cf;8A&i*-!Ln>b7HAn{aEb>+sm+$H|GMse?EbjkR z+nNgKYIM=6Gpp z+oBLpjn7LbzmgTH3uc0B2XomoKRaSSr*N_-oP7q*-E>1TWgJ}#owgyEdB^kil zBAeW1&ZIZ=!65C|M;-=4jEe!VVHI^-dAPFHxk@eByn5cXizNLe3qVOG((%9i*r$;mWJV*cU3wxA93jIjv-j^FaEN zZ?moV@|Hx>C7LbkV)09aC<|`p2<9)*_Ru}72I`s%I8b- z+qU*?SB9M#HiRG5Wh&x4%%k-o({pVna^)SBL;_{|05X28MKWY*J>vxfwS?nb4+yg zR3Voq+5D(Kp%I5eopuz`U5o^~WaXxqa~Y{yE2WZjccpb{S|7`;`rt&!Q~NpM2h(rn zO~Z=kze}!#kdYe}oBH^axm2pWJ8B05?Vud*{hUO6h-_BwKH{tx7F6ugSEvV7G^lgV zcNl&Bo-lWpZ$2*Wv-8>zt4q&sQSDP2QW`EbcCG*=@lQZ6;EPUK;BmPzvxth4KIP~e z-1H%eQzMO|m&6|;wxrk861oX*&(v2 zXy5~|0tiQGn*l94UA@CvC2QTF-ZeNspR+dcxr=4!j@0&j^rt09k6hn;^(@qc>U;Al za$L93{cv38&X0$|y>@dotojsjk>F%Jn0BwZ&0EUZ`OW>GPRRc%Urfi1c5)+%8&N?n zQ9gYkytSV4E56hUD=8nkyqrx~7Syn(_DPqChXTe;tUP~Quq)9KV+0cEMWOHCmNg6( zWZm1zhJXiVxJTv|AVN&mz?c3j9}f}^)fwGe!>NU?2Ow*&+~ze6TdW&e#3raYjP2ZH zU&#UATaBqhP!*zPBVaC~HQi@^Hi!l>1{oho>$$w8DJ=Po8^OcF!yR;)UATb=kQ|Xf zu6+~-M9yy1FAYh1-n5lrdE~wFv+q`mVCF`4hR#K+Zi@Ka*Xj#}Mo&F%@bS*0Iw?r_3VK|!oUG+>`q~0f^db_B<8#X|UqO?A>+;U2 z`DsX0A&2H zqGQ53+az(r>*hZ1-h8=8j_5@@NIJ`LI*Mwi09KK#!>iCwLyvUOXzgkjwJ#85FoOoi zoyndl;^~AWJs45B+-D6-b4t;N$&2bMy|_iO)M-yan1XEcG8=D<$_w#V9e|;V_Tq zOP><(%DK0ps|5Y&(tryj9%bB9xSD9PzC4&1V!R1 za5;(p2TKXp7+`ugr9|ly9A5WS zCI5Q~{SJa=(En@dK8{^5owha>&18>ia0k$n9`6y9vD6HFFLgX>S$%CH<}{1jw;ey-O3P_CYzgZ}dwE=O z${>1Hv`l!=X)z*AZ@AIR#_+;-tgRb9S?mR&=oDyn{@y8jk0~YB;d9-DKt7%9ORXhG zX%&NBJ<%f&DTDd}%!?+c*5@pFEYLP@thm8=h7le+)(#{Ivv_FWJ3NrG@WG9ofp2cs ziWSxBY5Ld~H1m6wRF92saxv#^&sK-xx{Rd51g%I}!$zErx`hL>-24@|>IW0Hi6=21 z_Hd7{k?CdOoOWZt_>Ve9o-|Rk?mMNh{A%OanqkxWr@V09JgnfFwlqHg?iqIMtA%ua z)O3jFy#{m~C ztQkt0;?@K|*$xHL#rm|tbuS4QmR+xm5nath)Pu>i9*IXMjHy6?6mP zUN?C5uY01&z3&rHQ*}~wd_+kv8P4~p{7fr!aH2Fg>Ua-tRv#drY;|4OZtgpPs{uD621evrSA~4YEzek7uae zs5~$FYD@6Mrs}%G@xD|s+qtkv0MQPf`05Jm-$gl}S{{%-J2SB~K_zPtU?7Z7?OCkn>)x`SJ^%(hL<6Tjg&6{8-o*23 zCAlWlU5!EEXLa@96pPmI8+q02NqJhH9hpWvMZFPSyzYxXk4dUz@6 z4B79`{!u@WnH9$=NCNq(k|%R0T)p}$9kFWs~8udj9iH zX5soIkmE-j*N?Y1T!b$2U#dj~VQBeFeV>kyq>gTBH*Jy)*xl%Iw@+`h`#0ryKSE%f z!T8;XG}_vHwSyP51H5GBr=H~h0q*u@fc|({EpTMxLMxrc)6C^1x})%^em~gp9@pes zn=(K#(lPRW%X;lq4MW!tpymSYljkujY3mORv&7wh``ksZAej}kA!!OwlAKn{&KGjfpQtdyDLEh<$=x*#3mZJ|2-jC>E8m@20l^&3 zD5H!j4hs@-nP(~wls{>U2Pafoibkgrtzj4`yXT$~rsxqx9ENeE$CGF=dIeJz@s zp-lnQ>ucH|G`Q)UypLiog))+NfXJ||SeCj4p7R>Ob*=*W8NT0ry0G4SXVyB1K`-T0 zDz*v}i(V|fywVLi_6>6>w3z_;I(S;&Vz=5bD3-DfEKR2CX!!Ka4d%$Wil1O#ZGQGE zC|1>Vepweul}PCf$E(5%m6LBRk=c-u?I0aN`nrWL+GkDrqcCT?_+K%>&I1b)S{Kd%=)Q z@(u;G{E-#~{J3<+3wwB~!W>krXB)q=`r4o;bh5a5k+ad(S)g>GASietDnw5Q6uY#7 zl+`^{ybEaa0izp3V4;8b{`RxWpV#v^=Ai5#gYQrTqId{8L!-uV2kcs+V?cTL$(l71 zvPudcGA@m~z2-W9+U@tcc#_RkI7%8_D-s z8NNYpnqfUEBAc7Yle_BxFTcOpEyt`W|F;v+#_{- zn92>nA3S!({?)pB%qZ%QXx@*Ur2#@*b^Ip|&}@aH%6;#?-cw9|ooM*sD$czblAebY|_7;RC+Q?@n(CL=dE_`w(Cc zGyA~n^Bb^N^1KDWLp{{o9;X?2p4TvbXfwB&3IA<7PiN?T_%@h>10%e&~vw^#Da%pi(i zC3tQIhCtqaM7LIYwRU{aDWq@kTanH0uQb7s%zV1!7F2h*w^3c(aW~F7hf`yZVCUF2 znXC4*dg%=MWL&eUc$uf?wmuk6^TVBK(6o|C!6hO5pcBIt z|E-mL3b4Ui=jQa6wtuPay4AVw|J#=F9a=E>3hItvpe3C@9!&Q_?YNq(JB@ZSAGvYv zSUK-+-h#aP+qVBS(R&A!W3wqSyOyn=)*H=bJf*gLR^G4s>48XKf9g(s-EgSdUI+wNt!p zZ8;H?vU|-hy@X5Pf?fRDo1V1QVjfm#_SH6EwNdZM@OH;uJt#OX=(&;2u3jp~&Z#cXiD z28Z}eT34^-MPjP~i*Ik+&R37sA}J%o4h-;?9NckBWIp^Qs%P6CBd;%th_V~J9JPOWZBS6nfg=^zlH>J+HLi!{B) zb+NUS1%2~#B^US3A!h2uv#a)%hR>pfEFEZ3t`0AfAl)uUE$3ExZEnV{*l&Xdet1;Qrp!@ehjLU(I22JCTi1X`|l z?kvIPn)Z^9aSqySf?8P{yQ=znK8jRe&&R<{4NRO)Rls4V8?)q)ex^^Ju)B?J@OZWE zX^vY7xP3ZuaSjY|zX6`c^1A7MN1|T*RHIi8m0$+QXKJq}PX3<6RYY^fLn&F-^F2$0 zom+s(AurfC;J?XN5#vz;TnTw(z7zCK8>wR+6v{GV$rwR4*7qre`-*X;3bRyM2w!Q^)lg=j@cP@k2GKESA?O9rVoj-P~jZU zt5FkAx3%;OM6QrD(QNAswmQiv9|g4#8$Y!x)6Uqh3(t|&h!zx?)J659xOz9-CbZqj04HT-kv>g)9#pe z10&0%Lv51AnwZV(z;;Ri66e8;#?WEX@ce`UAHM7yV@TAyG^u*oQKNCJxS@JMV{~3N zCQN&mE3`GQ2yDrBZ)ZzDPcO&mi2d71V7tTUaE-Xd?JIALm@7Z8rWN-7ce40R4TMp? z1aw1#VqP6+>OssG1d%JmBlhbZ#?`s1BX2#W8MpR2CsG4DfMb(tZ$u|SxX0H4S% z$+)dNCxNqYvFRFW|Ig;K5ezwaUQG8PPoGiHxWNLqltyWzf>&<%UM+R#(YX2_Aip8b zR-<5eIz*D7AnFp*tE8w%2cUaLEY7Y4s&fAhq@6#9_AU6FZI@Y&{#2}yBm!Tmaj@}9 zSp*qk#5Dj2US|>G71P95i~jaq`GnJEs~8XfC|riC)Ll}UN{u}sRk62Ms3J6 zImh|9ck5Q{L_cS<*2ikSfQ2AJb)_z80e8ifL1F+hBkcD1WSvko6PdW}QQszLp08W+ zOI8e!;jI|fGH@j;nq=l3XpuP5a;?ri|Hnfunk`zF~5aOVDg zea7a(?h69doq#3_U?Tr70GXYgEpibU^9>l|3a3Df~CIC6e z`SI9hPMy+sznxOWC^ReP4d1oQUEt@$8c{56!$*rH$x_&&=6m$iUP|4O>?gWwzQ>! zsBd(T9C9#b#(i)QKRzeNbh%?@-{z{;KwXdXyG`GSC_4TCx%L5={in|JZ$_TK21mGN z6khM!YSdHdf{yX&p@z&W^S!zmUr0k}kDfsf>n(H(h+TxYnB}th2M${F0I+PV{X^$dX{r&1P8}&-+2Z)kTuR^fnAn zBK6RYKQby3^mT9#l4`l^@j7(ilCwJW z>R{;Q1^jZi&G&c#>$ZrK_{r}8k7VsIQevJZ27hIJ`=A^kOB~g%L;U>nLWl`_^_bPN z@&$4?hV?ofyU1Iixcp7sv~BCkRjgJn_&ozp0v>*1YdB&SOcACU zJ*(xTEjeFZRF!%c14Coh7x2@8^$;BKUu9gL3@YgAfisvk?{kfB*}2x{;5wc`&#s7& zWD$w%Paq@J|2eSTS*}5MyV1@X=l_Ak&7FMC5E2^#yDg<;+NbgOe~ardW#L>gDOE!` z?F4*HIz05RCIEVP1RtdKP+N3^7-trkyeX4x@}5hrt9IsuX9L@4&)ye#$agkfX?0Tv zNx|>Ib`N}*Enmd}%-A#!?k_mvd65E*JyjQ~wm^Qk89diiKE@KaV&>_U9c>@JJ> z#d0SoY_1>=ewgl9_9x?w`ry}eg@3jn>ea(5dHHZX4W~_-mT4@x`4}Ixe!zkqYp##4 z?s$&>zdXiGvN{WDMhRjI|W$W^?*~z+^Zvo5D zB96QSz@%9K8C{nMvt0@NsRB@K%EG+sk$p?b!+JozS4_nNv$QbNqVjvC(t=Xpo3^9; zV0~-wP>1+1su~~J@%-w?xfOyNU~W|OkC_dL<*efBJ0{yw$u zrn^o15q2bTZKRKP3~}AzChY9!=%@%NiNz&MGriF4D|db9lR!7llYHTpCX@xsG7?$U-^pxv2wlYEZSXD@Ki?s6M`V|6e? zhg=sBpusv^0DR%4ZJ%>baA!s(+R~thrjUFS+*Z?x6V@IDi60E>>OVT&r_9~+&{rk; z-=6jh%?HCL@`C;`9h*YL3~pA2n+pW^pN^OM+&S8rp9OcY%)0x-UX>6B1tkxb{jHh|4YL9GO6lCa6sg1Ah z?CPqOdADw~=9wp%e!@tpdU`ZhSlU!TLas&nAL#T0M_Qv3s3LFzZy=lIG)qV9-ZkjK z^AjA&JvWS3eoo~#H;u}t<+*fo30}+iygBEq=1kaAs0^6l5F7+E{vudTvfh2fb)y&@ z>7sXOgQ)NOqWeXIyR!ztSwoGlrUG1%0P%ryMe2@DrlzNB>q0V}4Nn@>F&}{G?K#BJ z5^#3PE_+$QY{NEkJ$9`4yvo-1jlW~hCvfhX0KQs8wJ0!-f$Fk**7rB4gz6Ch6lf{@LJqI=~0Viqt#@|$4di)o~)-M!O`tBOzA$S>z>lPKJ23&<0depyBUA<&V5$D z%4Rtm!+4P@g=>3c%d`JiO=r5aPuKj%bGOm0j^2pJrx?(}K!?6N#+5~&zkXJ(z35uU zSw4EMHL!JUdk^ZFCcZ~Q-1q!)Xx+-L|tBamuPr-j#!y~+9XlPg8RyUQO$%EA0IBFoEZXvtJU4T4XUq6xpyOY2G8>*|yS7j`Zqz8h(`jyarxi=wFb)VB;aX}(V0D!95A z@@4lApA zj8EDwXTqmmec&ilYX_QmIx)vK z8Hkq#J(@K%r$}>|vv=7FV$pn*WQiGKPLFWT0qO%ebBLtDiA4sXm0K9q&B5OT6rbyk zWG`$0#_VL)?n0Y20sNRt5c=ew9%OKt;7rp|{T*b~8M?hR;4Vw6nXGo#=s8KL6f}CE ztTv@3OLu1V#i%CBNF5{p1Bn^DF4rl^?J%j2_ZPsOv}|Nqs3RXlgnV!HiRG;lHN3r> z+@$eQ2mACXzid1<<8=(*5T35|i!PAMVAs{|;5%}>exzxE1Lms$203W7U9U6VH$9$- zYTP-0g4&jg3V+RhFq7sn(%o=0E9)u+ID~ght^duS-bG zES|!*;zml~b~L4^P=cXQYad~bc&M#4uOH7m8LThK)Vb{bpsO@{vCBbZZ!eMDJeXXm}OU11@&S zu^FWYNraoeaFWhpZ1qcr@uIq222W56cm=bcm+tgNT-<+xevBAVGuH6Qc%K4ZiQDGcqo97xQN?NCll38xi=xp+Gl3k;f=@a5_JC`QIS^PVidVZSY)(Q1Ac1a}80%z^Zn>6up&DcRJd7*P^KV)0yeF zlM#lV7peJdJB*|5P}YsXeg88U^$CNw1$Z{Nu5>Vj)SP$c==i)d zOL5ZCgDU2W(^dU4~F169&^h0^miB^lC%GsH3I+V@d0!M4OD{GAP7 zyd*1g)T^G;2s*=Y0`b*fr2JE$J@T$)`Wd;YjbJks5Hbv-;Z9)4B#oaKtL zr*f9`3!dfcF_SNuPRkTkcpP6V>bf&`uL;v{$3-Z$k4vgEE)HO%c+Il8x2d;PNm4}Z zY*U*@Zs6^dR%$+b@4VrU#4I$S)xOy?Z!(>^)6(60OOjdk|7(b)P}=12@ZV$1f2@eX zRgv;vb@N@_-$bYK40O&e0w>_NB1Vo`U~FOmoP2zxw6hP|YGA7Zmc)>hiHYv^#W4Vx zQSrk{=jQa%x2^RnK5=hoAvV=9xE@f-LC60&lf*$9cr9!Gki{U}!j0v)>$7f(Yt0;y zO${8E>-%CeF4cl-D60l9Z5WB za#XcKM1U~0oTm(r{R_jIX>*&P9{=%?mQ%A#p7$P&Ov=XT6Sn+FIlzZEx65&y3~KlJ{Pm+43Fmu+`EM|EjE0F+2_cDlx_ zzk-S@iaoT@!3A-&h6}F2%-A)hMbCw^ba}v0!Nk0_xIK0EFF)2@;YSj!zYQvD!<4WAx2{`jJFfaBp9I?(%15NXc@XoJU%< zo$=^%noM6O>B(FODk~9iQBq0;9_}>ZbSfrbI6*2m39w_;O^1(x%=LNfr8CtlVn>b; z=Y4UPFE7~loxf&UU;GPW4$=A7Is&ZuJ@VD4bY=F+!E2Lbx)0J+UStJ~eP`%Voz%aU zJ0CA8eBEDA1-Hc=rq{J74&K+@Jp}zBoP=}aB=PMi5Vl@@pKA;G4dX6meZSY5s2_p6 zG-V(|hEJWMS;6I6mnOSolyvZim!jWty4~N;Fy2r7f(Pk&2L7ygDWNmCgy$Cte=Fm5 zN$s|JHWWnV0fJoY9_T|9y&>T_ZG53ZD*n^k;d5&i9tM6Y@C2gMGkw2s z+V$Z{?>Cm7reGrTO?L8pLkCW}01gTeUFvgUVe757q*1V{um*LYJYZ*%BZ0*AH}R2y zIF#dbgOfi+o&Z05@9#qVfV$}hVlDrJv&zin@3!Ujs%vwXZVz4d)Ox|Jiq61U>Lq^W z@zR3O&K+K2+6Kem9`?B3XCih8hIHu4z2Hpl62nG1Tm%uBfW+yE%gsin>R~^oB4~-zj^lxIw!(be${!IrNy5B^v)7et6lpC5=K))At%lTO&%M69MRMRO$15srzyo#m>RK1vq#ksf|0|*KsBjD0052Pv^j1>^>BmV?X_)X zguAQm4T;|^Wp+7mo!0uS{~H=@IW+mIAqhHI~^WJ=#_^LBp?W-bY zfW`$^TqaG5r+!O*SyuEH-?N|UFX}9C(_B5agGPI|JkRdTWBJXcu*Ymq)5FB|nGIXa zqj=Ar&=3!CNAb)-J)bs7X~0))IIly8DFlqvV=a4lM{t_bnN2-eMU9$*&#_HxXBK|7~TZSK0a5^TPVmgV17(d$j9D* zp!lJq(Ixpf-lH(qqjCBMBk5-V_!H2wWhUZIV+R*Z`c@5MbL}J8!(vEc+aN0x#h?kS zm44JV5RZRgYXr<@L(B$^j;CXqYOnu{CsnvEfL`Gpe_;TjfnU{)vMo7bRqOlJ+hi}- z|KZ8X97zE;W<*&0wklP$l9Mm3Mo^A4H5b{YEnM%r5e=89QfsRh+SDz$m28A6^#*_OXzGA7aB*qJ$>?*O^A<`FFUkIhwOlYPuU*~w{S^`!8{ zx!m20zrwA7hAi=ld*`ZaUIzmiRRpUM11tTR(2wp})B6mL#*@AVly;YCwn%%K*5&BO z`3fcKuQA}={%$ziss9EU3az!EyYyODF&fz;$yBQIMmo#CtltGT0CmENE&7EMMF72& zZOVl}lUeGG$;@0Xg`}hyaS)mPz~v5=LH*tf(dpsm4OFy7w3dxhUJ{ZBj6+w@tUr2j zf_R5;fF2J`&1)Atpm`!0JjANfbU>8lSd&3*DCxWw?Q}buH6R>0(p1h)RN%KYWKEFi zJ98syX)w8@c(y4^_+T_wjg zSbcp;^)HDOng8Th&=HX<5oKcJT?uOK4zH_#Iu#6SR&`~9yGPqSS@a0+^_l{NN%}3k zqjt!z9GE8@Js=UW`DVQ?&PoGfHQ9exn~%%<7^ubc0I!bvky;k$A+N!XmWVYcQDcn^ z70c6uhLr!C9i_iLT%?c4H3GUde(I1A#n!Hn&K+z}(suCzL(?Tq&7FFUg*x9olxp?o zny7m56uynm2r!p2RLIymEma_oFC@r;-SWn68rkU=5m&s7E=15rS-JkNjlTk*;d!)-ly9`+@kr}l(k5a+YUWT~*s)AL)56PnG~4h`n03|~#9 zqt-?dFjkZ>3=3E}c)6j~v_7ny(eI(`bOfjc<5;!OyITx)|OhS%q(66H^& zo2@K4CCk3Tm^0iwntDlk0$A~qCW;TR3$O7O9~MXXP6U@OkRj18^RO%|w@U%-nN5V>Z8W;Ms3AMLr;3v;b!HthR7H(p%*wG_jB@KGl zVL;s!48Q1I5Dq}{*c`-k%71jviI$ng*6Ei1MxRQt9=78hwl6xj93WJLn;31Rq3X zJ3l7YO|vz4d|sg8ItfN2ZA(!yB{z$Da3TuOJcO`VX}oyo!aZ4)>T`v>1;U8To0I?v z$4d?nh&OwGX}$1%j(7N+6|D$qM35HolIARoN2J`zU-%~6cqeehw??yVzz_NnE1%sa zCfWC!{ohio%$-)`LIF|xMALO4qI>>cRAxLwHEf8XSlg|atE!MQ6UYRwu(T~1qE2HckImIE&;jf z_`bz}Hr%i-!0l~ntQO-KAmYz*ly1sTu8h-ffaH?KRT>-fLGl@!x*=6uU*JdLVF~dp zpy-JB3++)Eq!gBLA1zWTdXCkWlsYxpq_$++Ti7tW=xT@-tKL2ZnBMj{-Xd6eO=L~9 zQ9w8*@uoMx z>f=U9l0N*!CdJ_3xRMqEHRQMboBxxTawJ8nt{vaVfbk!CJ+Ux^i+RD8ph_%_aU+A5 zp^F_O6XZ#JbL$qWoM6fo`Hm^#^^Vke0RA)_pc663L~yles)XlJ(fwtuGfvxeODb8P zTz3At7Qmgt0k&$G;yX-^79c8{9dCam|G#I$!83vWVEMI29~zg-JfX1yci?%&Po*^s z=cYQ+Sza6DreA3r>a-2Y`hO<3wx~n%vW}`x|GSK(`=Npn3@Ud zQ>NFi{U+5FDR%mOp8Q&`D%RBLg|wU!ael>ZjcKuF_?ZD!>&63MBh5L z7VZzoOZ7@`lCKffv^Hakm764dU>WO_Z4nRi!-ae=j&3P#)uUE0+lRDVU`k#aEg!y4 zg#bZ8;JV<70x3u1eg}PousUd4;HejhIfJV<=@@a+J~=-PJrx-0I&!nbJi}X?O1J#~ zqw2Z?v24HpZG_4ydy|SNl9m0SG$bmi?3Fz-vd1Grb_v-eRIdWYz+P*Fo~OLthx&+40?<0tsGK@f zU5b9~y2D9|jhtJ+vGBoc9fOoLB%hNt*{TA>M#vR8{wtEJ*wSQl9k08%yUD@4MCB>H zKVZ~`b<(XJIA>gSQT&h&{ba6M8vbGSWgd`yJ)BJiOW0H;7b84(HkNEd5|1>e_B%GV z8HFvG<_6{f`)|8?=k6^k+d82d(1@kf$s>lVOk9}4Son4Fw zQr`S1hotPFJ+ zevGLe#7tk#jC4k}--9uKAY!!P`VwkNSrk!nnT{hX^Itmn2>F-OIQHJ3fbm7x&o_Dg zLIbsvs9mh*eUX@az&vc@v+O8T$mVUe{WdXO z(b}Vm&%FI-?dMl*hLtKHsek;yBX{wkLWr8pvyg!Zn?ETBehGCSw)L$Zh(Z0~}T)^mu z5COl7%!>=uql@5fKLLWH6UU6H8hZNK>r7_vxBr;+EjYJEk`OTr5$* zhyzgG1V%&pyp@*`uiw^9Bb{odG2Qo$OJiYr{7)|?A1DfcODg;HfIZRH>5?j2-=m_; z7=?GH3{TTLuzCX}N>xvLo;l~FLEFc#! z{qu9W0PbH33c8vr=2hrBOhH_>o$Q@LZ7CvMcKZY;)gptKs{G&h%A@h(Rf|g#ecO9x zyTx;%-sL|Q4JnP4jtONP?>TJei;1wqfT~#tE=GI*so6sZNekkm1iZzor}G_eJ&`%a z`-r<_V>R=M_|9FJbcVu zj@K%w_e1yA+VA@Z!lBjP{7HQKd9G!bkF*5bt&!d<7kbh@;$)_FGx^Ro5_NAVKi>y-z zk3CDC;<_###8Vg;*c1$KvCYJbB*{Q;TDuER;yH~D76uFXS*4tArLdN$a34n`VIBd@ z8{>sq z?dP*lQO1W0oZnH-fMVtOGNlp^SxBA?S2{@GR zoW_FHP_~0(IF;B;div?PG0=Vrf7L!Hm%_XXsBFzZ4>s2TxBV*6mZD*@w}}G zZ!^EPZnw?wID-`kG6VeE=8E}3b8ssMtJhKbe2uh|u&Pd*vUt18PvxM|E;@!1juE>Z zp6B21_1!>24)%sM-W0QFwtu0w3X?DzyLRfFw%3DxY5xuugB3=l+>rBKvF=xeRbT|6+ z0Gmq3&L^Gh(z@zlbcn9C1?NVqcADS((f#{5i>~Vfrs)0-ba-W;`+IX%;V;V82T~Co zH>u;fB>^DV++UKr+qg`7S#`-{zP)j-FF0{emHJkG zGI`czE5)pyx;fH3o#UF_DFcR@3NY$>WcoLrg?L^rYHWW6Q%tjZXo(*FfGK=gWXN#L z+#SrFPm_Pf9h!UC!0n|tBr*O~JE!J_Yo>b+tUA%_BU}Qy6*H$iC}-}D>L>!tA0t{8 z-G~zqQimad=bo_J)E9T3tizcl z@jtC{tE+q{yOwDmJprl9*i+yyGNcE8 zo9*&7?m$te`D9(dq)uMfY2yzcu{dw=E{F4VG<>i2vye(IHOf!l{vFr(A)t%bLTfQP z!lrbb6JIXheP>ik8WV4#;*&6rAIK`tOf%5UzNUAb?>zCxcTPJ|3Np;sb@aLIlU)|7E+W+bAV<2vM z6n!|#nJDY6lnq1p5bPV@KR2O9^BqfDw@Mh|nwOyFsuEkK_&y<^)NRe;kf{feNGItm zv99FHM(lKRaw!Z+pL95nHzTv1*D?D9lMCMr)7WEssEy~7L^c?BNj2))WDrNiGWIB& z%cM=dIKO7O%v&&xti5h`oS|9`-N-bk{wCWZ?*tL~*Nz!u1G7>CbU#AedDF*UT!=91 zOjAFbx!k<@2~^~KqZ{7M#?jUsBv*O|ht5^4Tp9DG3od&Vw%~H|uD`0O{yn!hh*W`y zpeJWIb7mJdbbXX@{))`{y?Kh$(+G|Db_qw=M8IWIt#;SI4RfjKR!)*Zr|QdWXQ*bK z6q!XXnH2?oS3Z4*<+|K;N6KNXi{INXhgsbn&lWJ1)QEx#s~`=8wEEvH_2C>L|M;4*sqipl>O%i+s$-^DunGLGa*vC zvAQH)`y!Vt+M%3==Qi1EqViKsCGPnYcT-sN=!qie?BwLFE|4~TF86Qd@ZQDmM8*p; zbuXHqo7RP{JGBrt$;0J}sb_N`l&i#Vrt$kD7(EwUsS=~CKIJC~kIlTkn3|lUmlki6 zN0wh=%3~207M>+R7ET@yt57=fc~MsdKK1Q7HQ%%@(CCfeLUnTEU-=HUlr0IVKsSsR zjJ)1>)qoxPBfeV{XQxDUS*^^cEsME3Q45SO=8(#LyUVW6>vNu?`8a&OJly6 zcEIA0Rke!qMSoSDDV7+j0JWCh+X;QL8Yn@#U8u6Gxm-TBm(%yUCP(GgEyh~PdhBfv zZz}x@B%E3MyB!C^)Y#Ut2k&<`Ld-V&{rzj3ngZqXKFrR}UX+ruL}gD=XqSfN)Jzio z+5>U|Y}N9#_g!&6!e9#AU<1~vGpt+~2+sC}WHp-Pn5j}Xfr!T&$D+LJ1D#ZrB{trs z!?@YczXwc{>4*str}@9!mZ3P>4xmzwKRw+k_AF$G!FMvZaycNsLYvTCL7{|TKh-y|lA!YMS7#jwr) zaK|a$S42_->oHqjyk{mqyuEBUeq~UAq*DMsabrAKFk=ZvFj!B=jm-F)Cb^!ah#@lm zJ`&elZ798v`XQmb3w7)UCO-R4>MFiDkrI=%Rc*#E_^dy02gve_#GM-O7#h`MeSgUw z1`{-;xv)0B_=D*kzM#H;W@13b$A0nZdvv23ACrDBchjMf#t5483)UDVux@%T6!s+q zk`;`6`{u`G^%8{PcA(Gl)|RU;lEOcJT07F*3hd=vTB{MNfpI+$LPeU~--t-&qqM^XTS8EQOHL=_@xll|`bQ->6AA zJ1h=MD1vzou^*5}^dID&DTvz0X3+BxVwG|G$%!qZF&-{sD%wgt) z6CX*ls>f9=*XlqXma@~Lo#2RUOii-aNRngZerQk{*PFn7y9AG5VG$-4OGm|U`S)yQ za<9hdi;XMQ>>_@Y7C-0wk%$vxn;bCKsNOQSk{)_-P!P@pzIbs|v4oUo1L-wHwquLz zIgk4_OPP6i3{&^NM%~BBfFk?lj61nZgcv2(?TVshufHH6{?KZeg}75_&~B;iy9_2X zQOgMvT_`^jaUs*SS) z_UA;B=lUaGMDk_?elcN~$PsPR38dB_d8G3m9JX$^`lvi?ihqqis+Z>HKcSlvX2x$@ zJsT^pIFsU*Z9#@G*{xpZwm#3}J^tc3$^e%~*v>S$IK$;A`wBwb^sHY!XOe6ty$w~M zaUKk#XsZ|P|1N_qWW=7j zV^6&LgDrI#*p<7=4WHd7CMNuAGW<})KiM!ZVcpd`!;p+kJ=cfTvKJ z6n7Pi$l+{tq8Myog@^R7T!n?p#k3IrfXFNLYeM2&o=aF-_t`$DFHo zbbOJrQzT3i4GZX5MDi;k5u!K4u)61BZl-sZommu*{(qZyez7S{25yo@ng)n`h=kj_ z6O^8#te*w^8I3?Mu)vz>RTodZCHMrm$bF9s9^>DR`XYmGyUeUyhu(jHTl{rRO2-Qg~a%pE{n>ZTR4~F^b#WOs+kM8`XI8*@(og+&=tzj z9@Xn9;PxSI25D+rdkvqZ6crU`Nm#moaffV~!lTBx_uM+ECEw>{vvNAz4mloWdf$ow zt>_>poZ*vi7^=VMT46|xDQ<0TeaC_r;|uMix7bqR|J%T8$`bL=wdDAhFJDjvBf;Q2 zUf*EvwDdM(Im`mcaK`Pa9Xz>b!rmy-*HfqUxnRVPUK*W64GJQ@R4$~{o|^5qFdeMP{(4v-;j$OK^}cphg-nvL>U(S3>kZ(s6d)xD};&WAs`S6XEu>2afBHU)ZapE?8C^=fq%7b;5g!RY12MKx51bc_iXE{$uIw5YjS( zy~6Zr!_(!$Vqzv_R@YJEw|#H;rSsVo}ss9$|m#?TG~DdGg~%Jh{JbM0g2m-Ldieq3^kK5IUJFxG=EU5k+F@tgiCzAoO#C@k~+L zweb#}@0@PGfIZ-EqMc%Dn4%pa0!)Q7#G@k@tLVm(%fq_t&^2 z9)`M=qQqh%|EyVO=6F~|pRRiwr_=1}q|e{wWDg5!>s~Vvg*08NbbE=Nu{ovGCB|{x zO5yoLl5+9pTYb%`cNWsHCgdr$XXdNAq#49_9DFx+zsy0ebj;q=z4sy!9@^$xg_WXm zW_gMMvK+O)uf2-?mSxh_Q_910L>3?;5`xtb&Zfbg8JA~bnB0Uzi_#PY?kgiY(K+&kzUX0_{Ok(#(eBu4cTZ}MG-{WV%eow^ zRSzsL%Xqbavb;NHkyk0IuRG{0yryR{Y@wpZah+6Q(-*IVHg{ENcx@KL{FZp<<+z2Jt~3&Pu57A~wvNN7&1!Cm5xfJ)M-KdzdD%OgC`W&F zCDhh~+p`_hWmT})-rDLnFfd@fCxibpi*M%F+LP=xK(+&n?qD(t{hL)88yD*n&GVAr zl5;MJN&d=@^6dan?zZ=s(l|j_D5d>3-_ACoU`oFT9;eB|##sj#`%(NsUDatxM0TkD z-p4b)@k*t*l}fH*-hjax%piGHl@GI9`dVrKLo1R_(j||uG4kXX!#dYt&G?S^uZ~Um zb>XKCT8X~T&9RttQN-EJUc*ztOh$Y*godchtF5o!{X8=|(3#eJoAZ6jtQ z);grN?y95qw?3{JHGOnBe=H>u?*B`;x59_ATyQn5i>$}uhlr*?Cd>Slxv*lF6juX- z-@T1m16+LqLj!hW(luSfvwhNJ*#;5k^SN_nGEDgl{`9%mLfrGbe976bE^9~A%UZu% zZ)kZg@Z&UtgeQfEu*c}bs+JB_@_4GBa+Wf-6iG|jOs59C*r(!J@+lx%kiN#oE| zT*adgO1aro8a74yRz*vN$#qc&f2j_B9i?D$PaJX^k`~Gwv7Fb?3lLRpHjn16@5{Ow z(G)aQJya>_!#Lxeow__G95&K*x7`GMCJ17R0#lcJdU{qT?tXYp(n9*Gx66e^F0(&Q zs!vleHtxAnf|HpHW2IN7-U@O}`qv5P`J-{&{2&J%u`4^v9DJ&AwaRNU$)4In4s$U- zrv7SijE(Y9&i}oZ6IVf>lT(=J-RtI6Zc-vm5#RWCOMhW1yU4+9$?R_Q`lw)QYIklA z8l5r{m%4>>C-N%C&CQl% z2YHE^nYn44&ybBGW5wR|iph|xTf9|zzsIQk@kGbf>mFeecS<4+l}lURL_Wq>dXcj< zZfx4MSj<@$i%xg(b@0S6$TV%DaX@68_9uz9wKpAv({s~Z+5z!91{2mv?4~L1mKJk8 zY}>P5C~cuwH(o6Ks)TEp&5MI|LdT%ADC>705<;N}hKUf-xs%)1ec^{;lDA#JPb%+C z#q4uZw5IgMCHDkLuEj~MjGw)(z*9f{22^In+6HM|i}$iQ@Nbn$;}T}?Q-n5$7VRab z4_1X%d&O|NeW0)VT%2qiT_JhBHlOH%kLItUxZ>0!&n3M{&NP~X?{7<%3ZGtZ zQt0Rv-KZ`KlAFl3w9_~mrD)pg_g4F{XOA3W(tx-9r$>fUNf&x6&X=>fmOC##aIZ_= z4^O{Xpr`glD6imqU-8Dq>t-T@rN2b+2mWs7L^Q;`6Q2}cbTgV2$Q87}73K*{?wR7J zIvO)nrrp8t#jKy0mG2=i*HG0ZpGZ?xApA4) zCBfyYB#0WjXrgWfTH4NO-g56N>5VlJHD-^W@fpcZ!#{nn(oETwHTT(M5C6VIB0ubQ zV53C*k~cDVao~%71n0AEqrrN1fOze*zm9TVry{dbHcL) zsBq@zJv=tOZ*J@Ir~j2f!HqOs7&C--?aR zwW#vsi!mfVbGl2N&y(}xJ8+>%-RU>&pS5E4+M`#>_q6X^z83EHcd?@_btEo#Cp9MS z>#LHZ!;aT96sN9ITxxiFIj6CcKRcZ!`+-Etm(PrsoFA+84|J%kOqVj9GPTqUzg8ca zSQyiN8$Wip@h$fDbwwNi;G26&`R>qDz`uqKqNJv`Cc#^O{#Y>c*;)%RT*~A%j9djd zt#b@e9)8K-JR?zaL#qE}AG_+_&>5d)%D}J7cN=mv?Q_+v&Aq1;i?t|{#zc{7-5>2$ z@&4=9d&N!r9l9O@>ZD>e64e7HNx*!e@L4%k?SGjyxBd6t!}9vRt8< z+TY)A^aiv}>=cyk~hjt*rc+r3Tm^{d6+E zfLm2?V58@3&*@#?oSG}@EiAbw(o;90$Ff%^x9dyo8qil|GH{1HMhYJ`VwRMGTchl> zq2o3Sho;~Eiy1IEKj+pQg9}hO)4wO3mvZhId~G%ByTRCe#rtpyd=E${Qac>K%zD=4 zZJqtP)^Jl|uW-oQ);yao$-3e)!)3uZ?r@P;YNZ=G3*(5)2(8{h`VUot;-0nSeBdy% zU;Ri(o89yP+%LY5294uUF}p(~1zN%o@TB!8=I0B6)3x2cO~1nUpCEAz#9ov7Mx>{J z8xk+5K2fzX%rkpo;Q^mzvPn!ws!DBhIYo{wt%j=NZ0L_fJuL=xRmDGR&#KN=D*uwE zh@(2~vm&#mCBJfa*_Avw65mIC?G+81w*losN_v9LizT)rj62F-J_!4Nifb#ilL6%O zZ_dH!+pfE}#lLyFqP(6T7eA{RA4q3pUzoDn3!VFJqq_d$Kx1cv*~S^DotCCl^`akK zS@}lYM!q@K8{I(`&Z<+gy-R1sdeJCO^;oR+PNTo{_4-jJyaeTgU7GDh-<-&bmJZg| zRBLC5n4P53C&w&4Zx0W1eyWNuS_pkM#X`~-)4O+T^-iQ)THNr#A56=h&Cvv^-agIt zjUL-0ZSi_(8)57(!hIv1hvbC?66uL*#}4*4>L4;9PwUvA5Q%q)Cs--57O%MTlQXUG z&Fx^G^=~v7;!`_XyTxL%?RJ(VtrJzn3rX{7-|4`oI;o2L4t=<99ThOD8p>G_ZOcP}^zwL8AxbdW6*BuWEI$Jun{QLO5K<^2x zlZAh?cL%SNshL?5bx9py;9Q6rV}5;wMpn8Ots)MNNnvX8(QquoP$614VKHFX0#VY` zW4F?BK#S{EZoE%u=ZhjCCn)0g&*Cp8{o(xcZPlIQ+e$ub>Foa96B{M0LvS_^`)t@v z^E1;&JaYM!9h)Hxs%d%b&`*&&F#B_-meDXcyM#KK3SUZHZ1P@+1S@9QcEPZzPV*Me z<@^ARY#taR);i%<@u+5ag`~-Mb9)&Y8_c>>@yz7Y;s3DhhY}5bVB$M?bqBL^w1Ys+ zSjbp6Tf^3KbkLApyeW-i|8KzT*JBq>SUp$U>Lp*W^@};E%OvRv-FUyh_k8r6Ep_c^ z{H}J+f#-&W>KJE2Qqenc&HW5Ujoi_F@v*L(RU>`t`0CQQVS&;8jd$5oH<~I61dqnW z*!Z^_Au^@a^TYYvp{Lbts>g4epUb2%%;WZ#)P4~dTHwzdZ`p}P&|~z3e1q5QV}oR>85#2AUZt*TaR&Hx5fZLHtX_ch5x11a_fvkY+?zcR;ZWk}bQ+?-HK%Z4 z7QG{u$TOGT3GIfFtg;nlWd2_JQW01woth;W;A4^*7-H2 z@vBJGAkS-u!Rbd_m29AH_}3MmAvwKAJ8LF&uH+fV<86P7e&e=d?_Ed?%&la#%G5EO zbM)w#UXzxh^Ub>|&Bf)fS@61>64gOZw8ITD4dt&7Bg@;i1{L-;fjU9rm{D;g+p zj$(!m8T|Chua7$Nq`Tn~djH@>-4SDawsuGq76MifRzuwpN`R$#QM zhK5G%j~{m*>grNvWo4!6=CV-6{veOG%rO#%@K?xBW%oOt#bDJZPTuhaO#${+j*9P^ zlvei*Hm`5wPKR(BvC|&AAc-h(vq8Xt*BOcz_(_ZY(0)F)0~Ll4LTm>bBkWsG=H%89 z3_>14(%8nPof=6|F?=KK-)dCiVP_{)8Jc`NHu6ZDzJ%4cudAV`I>^wBjv(1e8_d9qUt6LwzfOR(6TKW3J> z3>b9^GYW*OOyzfJHznu>N#R-l8ge^LTF>L;PW+rLy4$Hv z8Nrj5EbhBt|CUFphg_&(N=|=z{Dlmjn4|wiY;5f6*m6JQVXXE?aZ+PjVKfG%b*YL0 z31(CUDpq_4K|=HGo0}y7C$C}IfUdD0}U&s%3R`fa8W#yOan@QqsYPphn&wDR_?N96YVlB3!LR!A-oU?G~|S=y29DX}ehY z@Yq)|rI+C5OI(6U!V{y0id!#ec*vhwzmQ*MG)#z%6(TqDMMFVyD#%X^oveG3pYqj9 z1nIBp*I3*m&lZ8(2}HULmbyh~a?eEQj3VR2NK*>5wbaNeR+wsOX;GLexKGZ57HW`u zy%5(dx3bHj0}|T*wl4h5SB-C++*TVh@fzBo3$)r`(np>!elNsZ+5C(;Wq~p|xQ2{Q zrfA;nKNEtX{5kiO%VBU4Z8(1PsZBn&ZD{oJ=((ty0KNkdwzSuzff6YFYoY-|#z+(# z$mH@n=CkF8$dge*`_S6w#|;3q1We=ac#!P*7iAdEDy~6!)6tf@J;$x?>aPb-KNWw4Lk{s=<<}7s&XBq#Zh2ui^C533mg;^T060m+ zGU_?x%&;+`vOh2Sm3Q84*8rYv;hRw!UQ%p((2{dO6b;=Le5$OF`uZu9LvQ!2xjjcVfq48#^ouHO)jssOaT~Ux*6{8Bo(MfA&?}Vfp>RNSWOONy&y( zSgL2Rdk_UqsFlpI;qZ-OTDJTxl4n!@_2epQXUsB&{W7>a*8uMRf9 zNbsYYk4Ep?YNrX;%I4GUYgQ(1Ff5(??JqcXUB3Ab-dTXOn&tlf{rm4~w<-&9e9%&5 z#lf?7`+BlPTw$NC17zwQWBI2~(&1qTbFPQ^8eX^z&u~6)0#IUilgYe4JHU ziXtVUAmGgO-zbmka*MMojc3#kxq!pMS-iOLVcnMiV_RY`a8-)_NI^3CRTu{Ck|*+t z_rB6)*Q-FYruosA2a!ylAXV1(O`{5pw>eYl#_72I0OlFqpwnt5z>Wq!Xb){n<6WP7 ziX8#iOOT1Epl@fX+7>=(m`hCbj`mf{fXn;1$5fI?rw0CXW!klHW?~j}+c5-=8sx8! zf6O}p88wVpiQ)D90b9a(4QP6xI8>i_7PE0!_Xgev$sILFxuDHtqUDj&i*TG|M;Lr@ zSIs_gNU2k#QT>$ugSyxS`Pa4!Ml9&EBJhjgSI5u(1*rc@fRf?vlVY!Wlr6BMCi?F^>!sg z7s$9G5forwykml`9UXsCW7%kc=Znmof@ds1ldI40e<+8C0`DVD->sI_Z z{t3a5cC?lL5g%?ju2~Qcm8AlIGsP@bo0GOOhZEhno{kjr#Mkh$0D{f zvYD2}Zib{R{9A{Z(5?MCj{Q!~6lKGUSKlYBje2g3Jo#OF1hf&k{rQh2VUQCds|hwgrj`9( z%(;vNh;gLRo%-|-to;p!+IW&b>vqeO`?#H(C;ACPND!Rj$Zqw3;F5N zBrOEk-M_NOunW|Oa&|&$d4>nn*R0aIhy_qdIAhQ()5{E03sR?$?*F3Nuc5wPAAJvU zcoA}8SLq@nSA7zpBNWIe{5I%=p7UfV+ez*Ak4lRD99R!YG#43xsvhmnY@nweS^-yl zc^V=cf21b*D3~X3V=A=&q5AFFl_>9G zx#PY=@VTTssyv;4zk2tf;?1_*H8k~NR=vdnuX+}8MYH*<=B=f}w=ALT9W@M!lOa~| zhiZ??Jb?G?0=1)7TE-1_&Tv>J-Fj8>WxHA$1eTc!Yjj@9Vx674?x!Xfi#Ax>wjpcO*p>2AsHi<3wd@qEW+-toEZ* zHd6Jq6%uSL4_P9L4|3>NyrgDsyklahuuU&E+_XYH5cnx%U-kJ>&I97s8YZ36%^VuX zg-mwth8m9A$}5fcBxrXXh4bqT2NJ5tLrP?owDE1wVL*+w{->gV{#7pwUq$8^1xf1N za~8_3r<(#{-?PzmjRh4lBHJNwg&m-fT@fi#In+sw1&Xw)(ip0dpYKt6Mf?^^dTrJ1 zlg8&a9pudG>5TnGB|!|hXotUjyYdEnKr&>8d*AwhpLz+C$2#*)31x~%IHaZsQWcs3 z8?`T0{dv;EyFZFon;F+(EE6u`rX zjSUZSt%mINpRa-?Xg?2spv|~#F`qUU3WeAJb8zDPMj87rUG#D6?(-;ErLP5ClOLU? z&0(0p+EQo?|ohQ6{R$TCMI;C znqo@UpB_*KO)pv=1j8l)ul1{79wTq~S9>slOJ?UtvHNG$b*1qipCChbgdi=C9o2gH z&}?Un$#*I{8<&3f+z1H?$&Vp>7-6U&)w*xP86ul~ZAa((# zf1v>~^^U!$Iv~K7|3KLu5huVVv+eB|X}OZeHm)!Cn^hGQytc{jk}gWH6fqT_k_F03 zh~-a9JFbglSk{s!>2G-UOHVu4)%)AaE9FskYoW`mcUd}uIMXQN`a%c>Em1EXNHa&| z9eAVv5sjR27WNSvO8CVMh_$p|mDUh9bFlz}*N_a@Qz;$h{2R<1zibTO@4gbkxKp43 z27(Or&SorU58mvQ-j^)zwvC}p-d;-~6(vRa6|v%WHZg;6)tSECK%^v*LWKul41QJ>s#eC!2oRQAhFcT0BS z?FPA9bSq=(ZI9~DHo^C}%|YluXo9Fw13A<4fg2HK2lbYri#;PG(LoIKAAJ6yUd1M4 zEMRt9L?-QnG?hpc&7btnht`i`vc69jMt4M>sm!;ZE=$~xb!+#{!71@l*m$Lf{z%#$ ze9G46?U`4E?>jOui;W#S8 zD}WbnWCUR)Qd6X&s=Be@Y3bi;S=jN+an5%NKXKRRSx#MDo##_;AViTJ4@UDv7cZ^I zh2O}l(C#{)%-<%4?;T=1$w1$cu=aJeVOgnqZc)w~QrGF87kDh9m@Py9#P8}ZFI|%p zoa%7&(D`Yb8`@uXoI!w*(6|O9ggiCH#KhzW1;XnOrN%4@J|)a2{xb|r(9~D${Z9Fv zP7lv`S2@zQo%|yH zQ^l681$pw=Zq`krwGQU)zCGE`6ScUzZ?>$3mdc%m$IF01$H{H{4JrWO8w#MH8R-NF zlYjx5mf|+N?hNmE!Sz?gK7)>D6J14EeGWdEFRB*DEt~LoJ72RC@P13Nc=2pok?`Wx z-g>>Iqa6WrBromO{(R(ebgZ$I$2Z*ke75JNxUFbdk$HNFHlTD`E?8~3olZ2p#ja9~v@7zdv%l)C3B_{TD{|(1g6oJ)Q81$Fpc1JHMI6?i}H)=5FK=tB@ARK~rRWTt3&QK)<9x-AT{QQW) zu&%|wyKmm6zqM_$;xyB!(LNAPTDyAd3>xMrJbuV+&S!hNW8*FX^LBTh3o|oEr)&~; zdcUO1lMU?CFWK9R)gSV+Q=A6;gNtP)N*-zNIoY3?j9srC<P zbt6X{(E$~!Gk1KEUzBv*{W7UkVwX??7y^D?q{f%bF0*%9xms;2lfX+e=kM_8*O1LK zwe{YsINLVS){F1QBzfH)vi|9q=wHL{?4PgfTUg-Zp5;wN`v|(Vtxw#2Q+?;t&INVd zTZi0x2ucze&k`X)H+m_s078TE`DcDs^KFXbroRIrkB+aS&Y`3%VNT7JJf!iLiy2AN zgBJFm+1E8oy2Tc!%ILHzw3LcvXLN$Bf77*%bY2j8xWCvD{Pb0vNrB;yyOh0#=msOk z;cBidm)(OyQST9hw@UA+SS!+N^OB}+-?OlF8ESv3|H0A!Ewr*={V^ZK4eBUD- zM6l}hbbVJA#xN+WK6`&Z^6Ve0r<7z7W3yp3E+6uHFS0LM(UUGO01RE!&&{RHtJa__ zWg~P*Ev%8e6|PhA1kzL~HA~&D}MdZNFTM%v0VFESm(o*!T63)*op|eUr4m2duF^joaSc>XiCy zBp~4K>N4`Aq2b{u;ZW_N?cSceVpECc({%TPe2a4;^rQ-VgGp*TZB=TOZWn(}tp^M? zx+{8m<=GP9gc#Y2m;YMudJ7!I%N5X)ROQj~rGfE!IhVTpH*|Gnb8Xp^f3|m57$XE* z_1DL6_`yO+y$Fzgr>3VV2rrGJzP%a-7+qpe?}99<2|ZhXlaxdWn~Bm^5%I(=T-9Gd zF@aiYkpH1V*xSxL+tWkBoI_93@>ICqoRnsR52XY?O37u3VX%2tHK&~zn6 z8l`-xDG_L7BnBLb5rsR;F_YvUyeI$<_yzIm6QUM!VzewhpcLKrz1hKTn0UUP>&3r16 zi2f-w&I-6&TG6Y(&eCK=v-M-pS|f_@M!$VZRqy#Kp3)1Vy*5T~Jn|NOj@U1Sdsm~B z!1SuBaPU#sX9!W7&IMo@d)brUKsf+6KGg&@t%QSz*Qh40@4$q&V#WKQpYu|N`cRBj z{&NpovMgV&pI>Nnt^6Chhm85_5!aRrm};rjt#>l(FHd9L3%!>nW@g@#mg$2ibyGAo zRf|Fyk%D}mA-tgfdOg{o5sKTE_@ew^tZ27z)DyR5%w^YIe}mPVX%+ORau+Gmx05EF z^i-=83sfqGtr)_S`+9AjHxSskN}BU~FEs8AX$Z7Fl-6wK(n=UU+$XF?o7xts5<#Mn ztnA`Du#ebpdi#4{pX|1@pb)=-qdZt1%TpY;Y!xfZM+oEn8*IJUu$L-S_rBYd z)Xbza35v;7zyEUINSo=|FSiAhV;y0LaMEsyMv)N(0%+(zK#gE83JXshGem|{t%w(d zP!(Zi_Aegjk490Y6k8pS>XV~4nOg?ZPhZsi$WDe!C#|gs74&b;tY#p|@fLa=BNs!` zGIWABJQZIOC0870L!|fcW^prjUw8JF>(+jhT9xgY*Xy}^UOx8NbxEgpa(z9&e;ZKQ z5STX}u@r)Uf|JE~1nCdVu%EezVm(EcXvTfU(mU1<^%Yc#=<534-mSF%@&e9zO~z7X zvi#@!^VuVm=L75?UnFPMKK@pmrHP=)9}Lxr(LLAioHNG)+7Azwz9sz9XQyw$6Zap!s`adQa*F(qMmJ&odITFf%b7{jV4xuRT_8QQ1}xNIg5-UO9ykJX+SaZY-kl z^+x&?Yb(0Mbu4RTJlq(6HW}93KV5n%IH*lv?|%jf5+ULyMim?!Ft{f7K|i5xAyBVn zb8ezhnXu49uk~G@s&c(HvE@RNWzS;G#xMOMqwo8qPY_(Py1Zq&Jo9qDOchcy_!|0< zak{1?6!wDn5uRsbP7!sGX6Hh4+Z23lH2kd5+0FHqp>0nX>wBDqd&4*?D$B);yG|ZEB`}0Q07BPD&V$CPWL|lqvJce01MDHHa_)cVNWosx`>%!ol z=H?)Rr;jY}ZHZ3w%UI*AO#YLDB2JPPRaN$jfi^!@fjxgMe6`C{!tE!~u|or!!fALN z>}_0y-Kd!BY_;t*=y>L!Wr(f~jp?`dOog%z2OdhUHl zI*^93*_}jX4T(@;InoHy>))v5JAvOI3p2f!zV)>}m_bkm3N+4sRz|BKP@tuzcUSO` z0;(oVhCG~}F%agghG>qQAf=J^9jKe~9&+EtAtn1W!&BCE_w9BR>d7ijWJ>)9`#{<= zbuaYE@+Jk~+Z3!S@y*I4@&d3K5RV{O4{BDyoVC>D7sgTs=cUADWUQes0Ck3HD)(-j zL?M&1+&fwRKd}1**iTY$jUMF0fu4j_IXs)NdIu`0FfUJ^%*x`wofAtxkd z)mHE`F@Oq@#f6y@!syG8-ftIO;Lv=30~q#fVm>HinX*%(-2{@T;Kbfic3tYR%z`W? z(O##z7kIiNpPH z7-|lnP`1QwAX9`Yc+LLE;X#X0EBq1-i`_78PO!S%x#1Y0DhVNO$E{TZHLTE<; zVx6*9`6_z(mRY~h(Fm4jxXG?VFbFE*5afB!pcuPWz1C2}gf-+cWo+;p$6j|Ik876F zM-^zlx)p!&zj8=yeA4Ff=jdf}v%^v-nrrOle*6!V51l52smqGR)|$}Ry-jlFuQb$+(+f`R(PB(dL1sC!X5-<+NMge9L8g!>WZhs+E9qLWv`2L8;dg{b_#~Bem^F zTq=HwRgH*iAVF6Vvr?A5b9dIAiyCFBFv&FUih2n zaM|G9c3$rEViAq<*8kr}TNoRk4haimhDEDAR6Xsp+(k_fvr4_yIUL!8 zT`%QU_kxMoR3VQy4mY@5(rXuGKOps?CjuD&s~K7%vAIUmfkf#iW0-}Ao&cNc}M1D z!E368zQDx$p>9yVg)Nfu=#QmFp!fpnTdlsRZOJjWL5Z7#;4}1p=Ndci`AFF{TJ#UP zY@fRD%kppw;2x|GoLArXfoC74Du;XiF*lIsE8nV5@O6Ct2VGzE;H)0o0qsa(IfaZ-kiO~y<`lsQwf%^ih8C1jhD$SexmEK`GdEHa%ElBvwo zPBJGV^Az?r6|#-nu)XWqo$vepuIu;5xvox^JkN9A>t6R->+@NkrTO}MrI;j71v`86 z$Y1AByG{g~3YC8AwZ^B!?ge6ckQ94F2>HRC||lYa7P{ z00R732F#a-e+`|;s6S0?1`Ul!G$#YEF|-_FtxQ^;fg9;?g9Au`f~I%!89|p_ukn(W9Cu`M zVT_P(oLbqG-xpO?fgaU6Fz5grGRtTm2&mb?8>&wD{Eo?VGyjoqf~TB&550046va%C zFQdQM{7EZ+E>AT*+(a4DUZajy*o3kG>wj)-Zh2FgnLcs$pVu=b2Qoh*$R6z>X@o#w z$^EtkY1yTOLVx7vDJj}n^V-#t8CZkWBsoNayv%8J_YqZ zRkJQ-ci%jiqP6&R^b7cVif+t$s!{pXA~%fmpD|8SVGTi0)uPhqCQXz!<9-|Z25-|Zuz%?pgFd^ zO0gF16WXpK`R|6;{@x~IkPpa{9eQ!%-Y>G1I_r zBBORE0mo3yfkxE-0xjtJEeL7fA9^u-mmJQ<31#>q!T=O=3Xlz4{3oX51sQpW;V(J< ze2N_Ne1fal^ZSn<3H-%^paPP>4?i742fk-W7JHu>*YW2MMBhjNLeZM*6V?u2dg0F1 zMJWXZJ6gv!82$ObPwjSJPtjLUH8iVAnOOGo_aCP+{)z5p1 zzd0p{LwxRIF$E0a4f4otcVye(CM|??3aWGEAp9{zuFg@oKG(-2lhY0Kd1d9ovO{1` z=?F47+@$P+^tghsKv^H+H*fjSCVu4ZQrWNl`zzP`PC)C&B2w|lvMil`{0IRV;HR_m zYCC`d5(TSs9w~W&@lYt;DuTZG%)P(I=OK9;eo2B?uFm~_Kg7Gv-yz>Hf1VY~MA2=p zlSRJT&%fIMVhtm3cBmVYV7lPnmCp0Nm3xx!x%fV>jVcMDlpP3s#2(0fpD_%ERH7&U zR2IB{AA-l@(IZn+Qw2_TcJt*HXfv~G@~}geb?tKg%a=Z+Nq!hpgx2m%Ni=JQNRD0x z6H-lqJphKNgO>Ysx8ipO;#|~gFPjOUm{>z`T?I^~vsmm6-TitG;ahRRlZjN56Bw&jL+7Egy5pS@WrQU3}H8j88nMKCcbz=fK?YV;M*au8vqdrducg(>2V zu##>L-fxdKGu>i5)v}P^amRAmKR6I{Sehx;XZU~mP}k;Xcjt~PUaEba=@php9q;N@ z6>IMKvD8*9Xq=Nhn$nFh-^i?TRpZ!o$DxsT>y&HjS*t#Nx-OAm`3J18)V6=n$Eyu;& zX`i1J!q|ze)#(nu8LO0T9Wx%bHp@?}IKFEWU%X{0wg<`N@Qu%VwRDU{*fJY7=Y3sE)SU>pp9{sXd^bV(5H zR>@+8pCo#g^(9KOGgs5~#Y+7`S1r>94Y>F(mJ5~s^b|_83f}tE8MCE)XGJ%K=v0s0 zhlnw3Hr=PN#-W*-KW;Ik=XIp|?BjW~CxprxfecTb`uxQUnkcHg_OYR=Uc1nj<+x@G z5!`5!2EOgv7&DBOTVal4##%epu#L2bwhu8mAyGtnoytu*JzvZapIf2186&$oS|FZA%ICmRVj#+MNg81X5 zi8+VOZFkIZE;m~n8`Mh*Z)-A{XsK>?LY*B&+SarrP=BIk#0cwLKu z0~FmpBaqQ9YxeU%-hBs@?idd@8X}??6mC1pTX!eF`|Dd=8=l!@=C~9zcKErGPS$la zz3BMiXF?hB?*uj4U)b=WoH(MP$lbI0Vz7%yqn@t*U#`_;Z> zDof#nM8~1`x7>30*YCg1Bw|Se`@_Zjr>%~#HXYada|AK=%v%^mED>rn$#JO;&HX&& zd%e5~iBAGiDbNnD&YOG7h=G6EIjos$KqQMa|F^)Ls>Jfnh&gO{l(e?GEfhSTL{bhS zENMTUk~#^6=2^pDr=JaY$v>gll9C_T&qbT+Rct-jE(=dmC)OuEbRa1N5v+`je@E}| ze>Dya@QBOY-d-$e;gM!<$+cdl7b^L}59`QiUh%f9KsiEthx`^+0743GP%iX_MG)IwETmXsAdBcG29gEM?3z za$=-C9MENWsn*0ii)E=0A%#L8QFc2Kp zEo7e$Y9>0rsBj-M%A#{7SDM~ne9IhloqFI`FBaLc3EdPoU5efT6|Kbv(Of_}H3B0l zeTg6Vj~g%`9!0gcJX+kW*6YFN!C_mZ#yS`a6lFpta>Bm?m|_zkZ^`192|x;$Nn(v4 zV@wybGY?;pJ?}&wdU2}J;m!(SPbA||^FY~iW>o1LvsYhVV85$u-h&jE%0!=xqae36 znNVkj?1jZXd`Q9J2!j6P*K7k-rl4u}P&dU5m_p z`7{FuaBZ^(a3e7xbptfbG(4u}^x;DME9jsVo0_dXc!Mn;mqX3`jfJL*8D&5BHYYX% zSrgv6wMGpZK$*w0RQ8o)io%N2(89Fm?$!e;_~@TW?M(ZlDrZLMrX8Z$&uxaKrFjwB zgc@O_uFN>%W~J7DHMsVmf7a`xwT4qSFKAg-SkYsu3lic--t1?I%lPX_Nh%xbsfaGt zU=)pYeo1RnwauPrII}&0&);*mfapLY&)}%GXfC^GMF5xgGKAxFKEGs3)3W`AFRYlOq(tP__Wx;1`d z1ucflV0df6CT!t;xp7Ow!D%Y-JC_6FH1ovP`#Zb!Wnn8lq}0W!1Nh@#67%;S1t4O7 zYGT4p_!vgrC6L^LZ7qGRkNFVnd8(?)#uvIci(|?Y8+8#xoa{#+QsbH>Y1KT!<`#-_ zd^V%`ar9Ki2=6;y9@?_#I2Pir<*=^#rII_l<)5~KI~*uiVw>R1d>A_%F(Dzzl=_nB z6p$8jbTjtmRUko-2{II_9toZ+wn7m_eh?r<&SnHC{}&*|(C2hyZZrrC$MFq68jO$CHX|T z`^o`)*{O;|m*c1OlnaF{91L{)JUszq5mNippS03G7g^T5)hOSuj$;+URJbwW=M5pgQt=@VFe{Bf(ONwiEcPC*Ysrfj?U7@9|6JEIFD(Tp{BO zZg?RpRor3`8*gZ1YYR`a!OB@TB889RFOK=b?|kBM@*LJ{WhxstlX9dyT3?|C+|XSI zq{`BtrhrueID+9{W|@VuHwC^`0m~NBmy|113{pyMz34LzLOP>JGRIodO zEr6!>$d=}ntp~d)Zy&T4ttRiRz7b`tYC5s$Ku%27pjo`c4)L{o!;9aG17tpL^ca3~ zhS>N%H5m7@b zb^yz9Lx2h7>w1Bvb7i=FljBlGuJ}Z2i@b?MX!De6Za}U44;$tX`wx9Zi&|LL05J!X z#%U@o{o0|6%97sFsPuy^9gX=84J#j-AIy0g6(((O)YB!hjgWJ7xaeCT;-%AoZxXIF z3q`V`Ej2gyHue!Axx|{ULNzBO*tU09ySN~;h{2F?-MM48o&Q^2rL2wAR41I42xiY) zQ0CpxVcWNpE0ZIsE$GJS?BH8a!gFOSTwC@#W-VG?Qc^;;P$ae>*HHDH8z-dhL@Z_( zyz;mJ$x|B4`c<8Q3%-FsDygoFiF6StkA-y`)JzX+o!tnB%RH+D74y_Z!ijex;G^Ge zmH_f`y&Eg+JXnj>3&mBiixqjMLx;&VE%5)s=7)w#{(3FpLE+FmtU+zra4I?gyXw98 zZNqjN=r5|o&H7EHVEBRaV$|*XH_(^~*vl(N$IWQ9T>3TZO_qlIq8F!1?)f4yisXmN ze&zJhr1-9~XsR072YLH?pJ0FqL6vQAaL||?Bx`7ax~Bo@CRFwc%q|a|iTF@gn%FD8 z5|@Rfj)>2B=>a%o1|lBBdmjpjeNDt~yJNTQPU}6MOic2`3|RdL;2 z2NVC{Lhi)dyuF1|)gSA@kX9X^^_nQ>&rB0EHqMe7f~e9=5)VW&4uT zBf%d@Y$F6uSyq}JIl{1G^l)n`C80}@k^phly)`H-`f3E8oU4Wn!shKFccm)Z=>OjU zb3M3&c3xgDH)2T8)79lX=(Jk5>?h&G2H(;8ZqR_#Kfx>WM7gB;p^}V!-{O9Q{n;P0 zG#ZPqkrQs|%{9`>)h8ZucA1qAD?t~tTokHr=j9qym`Zd41Yu);?^SB*swLYftX(yw zlCQL<*#-Jupo>wd6jf!i9u7TFnsB+Tgqm4y64w?VMxi8N zpt}ai6oN>pQkJcnGz8pxwFugI*)K!OIeB@RC2IdX{<~WXMa$A5%HQwH$T22?Q`wuY z%Yyh(<|@xf+uh3An{vDVYmUMF4QZl@&s`6NzB9xKAgIdC%I{)Augp)LcH@q@XlFq zx=Y_5Mvj^tIu}(Xil;%Ual_^?$A<2&)c~iY60jr+zIniA6n=R+WnF+kq&%QxZavxRRGBxdbZQP^d_@`q%`bnHH>z z%>K5aMO;{d0UUwjdNoC7cco7|6|oCvK;le0d`_+oXuz-My{)(y~{+XZton z2t8C;AO5(qUnz$hzyzJZWp@R|7^9MKu$8y|2(_lm8{Xt)^btOq?Vc|NB}+vfrl%OI zy3t4qqKbrU+&bV6gD?p$Efe+V`97S$1hX8n7bf~Q; zy09#Mz%xKy6?BNoBbfyXrc3-#jIjZeHt>N$(fsa#{_+p7V-FG3pr#(YnGqJzao&`@XVqRTWfjN> z5CE9hC@aCYK9vuX*K;L!3cq92^V17xHFhy5fAfpR*QjPF{y9d4UzLC6iCttC0?fdR z>{I5Nb|i(iTyHWYisXX93P-uokiOB~b;6PbNV$)Aeg$Ndg5tIZ2#p--_?XEdpmzW# zb;Vrw0NyU9wj_8=N2}4x&gCfIzG;s-sk_I7iZ~a!2K*A90LDO;@g0|@+6hiKJ8u$k zmJ!79pf4*yW#!8;1b5PBNuy2Nk1Q#lKw{zp-BNnWL~CSb4mr75+L^LgqPP9FEd4}W zM6h!jpSTmpM`Tl>k27L++*!6`5OA8_NpAcU++nxl#d%6^xV2-m;LRdV%9`oZqLl{5 z@tg$ou*x*~4GbeK2rdc@NSb+Jn1>w5bU~V_IpPl`D!GFf&zRRQ<>jQ4FW!&3d+UCW z@n)_fSN#2C7(`Jg+jW=))ChhfM0)ktZl`~2`X9BY*%vfe-kHUD$o<&ZmrGzW)UYdd zZlyr$|4;>r{}qutns7a!0mX{qN8HV&Dvf~Uu88tIDC3m61ni7>9EQ(-dB%90gNwS& z!KZx9_AWifuPY}jJ*3$|gEws9PoKWxpNV*VVi|5ANqNVw?425wVZJ*FKQPfY5K(m0?>h02!y>6`Z$*!b@PeM8hu_< z+DO7o!;0qXBl}%HDa$QqD)>z+O@DLHO802L01oL|5;^C#n!A#_3+cFu4qRz2J9X>* z=rGlmzeX=)TrUk~MHg)p$AV#qPfk|x9N;4wqFXE8f%c%K4yo{`7(SNobU|q%(tPL+ zBB0PNAmaIFne=j{d;*j$xi35nMaKKYJbH{ryI+p+#n%grnGOAvU#boD&0i?H#Z3E& zwRRJxP%qNh*0Gs;HLw7$o*9W2kW7x$-=dlj5M=R_TTZI10Q_{xL17O7`F{t$oBx;+scwt8_yM8 z5f9w{D4X8KK>&sLBM*PK2r0v%)%o*m$8&;8j5u1c@l- zG}4$7_Y;qJHpAKZ43WB8w=6VuLZm~E-NrWCv7-20ap^h2ck653>pox_!2S8oBEl6QZV%8yMHD|RK_ zF?(UWJ6H6K7;gXQyUo2-6M9UK_IU2-+--WIVV?u^25`O9OZv0v&u>tl9%kDAd?#mr zO>h4Pw9mnQr2TBl}g4{YwqIh~4*#I4s2#IX{xN5{kf&E!h0G|&#lkLu>Wiu|(oL#i9Z zd*`{0qL6h}83Uj6^@XAkbw}0+g0#W(srYety!CbHHQ(Ofu)N^zZ{fVGkykJDBYpB=47ysompyGl)ImYE0bqSGN!C*XD0qTP~7iUIe2q(OHv)Q##` zxM=HPSmn?IIE~iBKegjsysIZPjNyNzQp?+RZ!YZ-9^{;D@JfB%#4FIOtjJwxA zkh-&O}CM3wYN@Gp*cN6c84=Dvs!{Z#hiIA%6Y5{SbOX}4WZ(cQo10%q{Ra`*c! zjn)|N0#45LYBdUhC?2_BAF?#At6s@(q(aRHr#||NDux`G#^{YPC1UU5(vFR^Pr#`Q z=98X^dR=);MK0Mv5Wd}eapkh?UE6DZw|r0(=wq7!7S#{i_$u=&dAIwFEXq_kFAB&d zwyd)+**H3i*6tEIkV`m5!;JAiTb!|N9#?=64pwgF6$*2{cJ-U0xDqVH+SkD>d-mZ8 z$tMwc1Enog#g91MhI)S03w&ko&<*_d0^x})%>$gt@V`U^ceRRUzY}|Y<694GLnIh> za?SJ3gCJIJQ(^_ncY|!q>XJG4dHG13~O^w-2JQ7-?5Zs(fLcRpGs&xO^(@ zX*q5ciE|w&7LmGSp25qo?%<=o*Q=Bz6N43j7A@R6M)V`H#dRF)$l3$(>e9rIVhG`Z zg7`gOpQxxgal0hX6s>TZO+sxOnZ#TR6*s$XxQ;WAg>A@#$dPPtYwn-{Fx(gq3|50W z8$y`}2$H?P+nyAdp7AfbE-#@yPeED*_$l*FrvsOV{X(w8pa%piHj6 z_eV7sWXJCRv=9pmk`N8;QFHLS4k_#5q>oN8^w|6XcT^<4n4A1jLqqEx-@&lTiIH0uhw>m-T6^|#g>|qBYVGp!wk*G|)Ovpz zaltMM@WL)d%sJ_Y{a@BZ7m6XxbK%EX4ZeX8By>vS9F~Z9K?8Be45I zSkv5W;H#OgNn7}UNgE9?_O*8$!dlGiTC=&$Azc3hX}jJ{@!#Eg{)A&1mY&N4@$2$D zJ3dIn52ngT)y}cSW4c)shCP zmc4gliqTRAJkk3I;tJ(8RpxaRVo?^_&Sy}^(>9yATtbkjCQ|~r7LYJ}61$tcAGdV> zLp)Hc2A?DmeDega;ep=7$7c(cthf2TEH=Q)vLJHT8~;s?@ODt!znGI+lNFYM`6A_$ zGzA!H$=}WTAi$kLx_QQ}0B`*dYUv0_t9_wykot#3Zwa%rSkVZFk}>$Il7xjn@m|Zn zl)wsd)2wH9!ke*-9MH{Bc4ApetPEBOYo8WmoxZ32vEKazL#dw zX`U4Y1v$B-`Q=S8e(i$$sa~TJ=~uJhCsdfHx4DX5ac4Y7axU)!oRFoc3DQqo>M?7)!J?k~PkI*M4{J zX^TMKRzYLpc{xE`;n6|@fA_$Y?tbO!`6Wkt^D|w7I^gLES>NvqG!X6u2b$*>mc5urYB(l(#OZXF?btlL@2m~~UCMdaQv_NtIfs_XQFh#KyWRt>cK?(HDmCl0QT z&!dD)RfWFD%9T&I%-*g&x1kwC5dLKp3Fp=$&w9=WBQ#VW@3`q>d{HD_jcWug=c93O zPoTab8a>Lvl6r57HGsOb18m&d$myOG!|x#nmhO9z_&1@xQbS4QLz?4n5?}E=Z6L6N zisF;50`?83a{OoKFVKzF;AD<@Bw`h-yX$`2**>bpbgG2SC@S-&iu?p=qoQ~HLQ;<1 zbDIjQ7*t_{ltf8iaivHD6@~usm%t?Wuj7Yjq#ZkbXTEm8B0$OS^&7L`Q+8uoib_gp z>83|KQBBdXMy*j0dMxzy>4Rzv^}`$BVIJewo3!0hLt1}jbMMJYshUTr5d`#kg7JZrvvThBO?ssFv@T3y|eJYHo=(V646|qs5C`S@w!g8 z5y-xZm7-y~i@NTuWS0=7#4e75r9*`yt*NDP@%$zH1APkfVeI!KT5iGUuJWXoPLj3U z(Z=UemUU46$VY{=ry50e&*W+}D)!v{3BySMN2Jbb1|HT7+1W+oH15I6A0Ke7q~vT0oF z-ol9;VU#torLje_u|=V6o;o5J+pqrC)HQvh6O3alsX2GOUn-L(`9U&7u~wr^jm=r6&LR|I z*87Ya;jTzqqBztqgZ?^A86>=P5@CJy24Z#xzdio_L@{*g?g_L7m?R0}LYmDHTgs6{ zx!4?jg22`Ekao{&BAAxW-F^m-{M#&9mqMMNwHT&^3a${@xK?FnST8$pVQL{QEcXci zp(q&UJza95g?s)j`&wiJp;C5W^1b{{@A!JjBeA75m{)fd-0GgFdln(p34g3DTnL3q zV@s8`3f8POI9*I+dR$p1$1O`Wv(n?;*-Zpt_&bmONIg+Jlp6id>Rq_Kv3-V6AYO*s z)K$XC`LqjUf!qi1i^qbk)$wcEzDrGFJT6azKC&0y((kcI;U~=WcW(Tt{mmRL_I-u8 z+Ow4<<(VTa*;+hJ@!RPDOP(zpA3& zdsFrL-&>=%haJyH^S1oNtU8REjnW8$(56lpIQD;U>pZj104;D#*&wqS142fjvobY4o3;9K1NLGTM zvt*m<>e9Re?Z&3#wK#KBX7?zjkKx~0GwGA#?;i2245ZkuRQN+Sn06j3oSW=9+&=l1 zsVr7LaRb_p+U5&8j5;&ZtX`~)OpwaeV(6rNJqWnAhlr#lX_DPh0BzOKb~jqd15M@J z|7E!VZp&SG1^Tikc!)6V5;f9lqVmDH&~*DMcMJeP(db?7_ar zDBC!LxU)f0T;H-Ha~46^T<~`8D^dHfq`Scw)JA_btAo^Xy3LBg=oB`(^O!e*;Bv(r zAvO*^{TkNpQVv(V^6sxvi%Z>cbGO~jWCVte86e$;NpVh7?E zotSTK)~@yFOy;QU<*_2D2A?!%q^7Z1H4pXXh=m%jGFEX^b3CCu4hr7*;69dileUl6 z8>gl3R{&APse8jco^7RSnDcxzY%qW*dM>GGh79+b{$elb+!x6+D~s)DT4{5BB8|#c zkkHwxZ4L*pqF;>1X32@fRjxT!%>!)tv}?$m(YH%k#C~akhP5 zT{<(zru?0q@Hn%NLuIm~>=`2kl&kE{j{XI_vQ*Cs7^gd9td4LMuX<#p7VHeSx9xO| zZO^(olH1N5Tvr|TwwJG)h7T0AVX)g8*U7tBsLfR0vP=yjk*tCO$2S<)HWH-`@6-DF zId=S5I`aVIzj_AAE>I*$PN5{|RzUPifpHX&5DaoFzzum!y0x)fyo6TR%N^RSzMfeC zhsm+WiBZ8u@Xz3OU4`7U3V+e!iYoSnuQy$9!H)@3=TDb^H|aBVKo8*hMXp&-zB~4- z@n^y2YvP0JRMTiP1=7iTYH1L3# zR*+}rDBnbz(Sa_3TDOnZj@PueAY?vlWQxYbZv@gWJlL)iypCk620Zmg+L^`a6J`fP zAxN0Zv_UJVVq5R7`g|-P4;rBuO3tY+T=uT4mRNGFcn#NkAPQ}KeDIfiAD$~rRqx9u zFC@v9s|XOgLRuLwO#_8R#llfq{A3@%qA#3Rrnh7X4l@-FTxk$QLR$$;w4sOUdev^d zh2Q!uW*(%27m%@Qxu$5$GIHqbUL)NGxbJIUGx^tE0!IgZTaLE+kF`j4){6I09z6}5 z){9z_mM0*WR`if>>Odx?rHFAno1kNk`Xk=frn zeTL&yAkY_#r2R3Md!_VWkIyd5s8X>kxA0VHN z@rw`s&A4{e?%eX*n>o512fEobH8lyudQm$OHc^`G24H+wQ-YRY;zh2O2`@CXVb zMxw^}@Ly8St^ds1HOKobW8^^mAFuUBDm$=>HcL+N<-EWeewOL!>x)yfu`8LC9Z)VZub#cn)^NnRRz_;@ecpog4&D% zaq%z1ZozyZ{};e^P$S^V{p;eWzxw>QYwYZ|uL3hM*Q=0qY&X(+b_4Rzx}#ifiEk+| z{mg2bEgV6$9XoW&5E#F7i^6WsqI~Os6vGiIPF}emMKp|A(a?0QZ+2Ve$-l}ZtyM2* z!7qJUd(nq)?}A*y=G7i$b6t~GBzvchGb9Y^@}S$Zv@&Ze$SfL4f}^i5YGSzyj1Kmn z<|x@5n{jf;jS&ztT071EJ8kfkWjMqJ6d1|%UrJ%!v+s1|)uCnO$zuSSfKLUquzMgW~eE|Z%fT3?uk zBQXPZ=?;7o-=9U705p`#;8~v6fuWIaWTfGQ`Odd}$&n|T5my3Bm?{$|)QKy}svXe& z&*5phP`5FkVI<$i%2ix=n!mgI>aOBx2`LiGua35xb)NfxW#kpWonM+fcn{fVe@r zU8CfznT(0ssx43Z_%pEs9^3Ggn0xX2r2{O7GocHh zuL%>}`W%!Q3XRke{seFg*5u0J{MIp#C&eLTCi=KuaihXQC+o|#FqosLhW%WO3Rt+G zy9Gcm2&E4yNU*33N!a*N-=CAe3#4toz3D`ZFXo?*yv!j(6O8PaD1U&8ae3{17LGpS z`XSA9nOGyJ!?pPIh7t6;bYoCqtu%3ng|G?an3X2j_Cgt;niAK!wdFN23fV2i`$qV6 zjCkd0i7w0qABm+kq`sntJVPrU+hb^coV;TBoR`kM>M~9lh#x z1$orVzj84Y=V!^Iv(#ibBa5qr`38?^O_T4wcxr-4&n2FbR_L!E)=WS0824}_nv}1K zNurZJfRQLCp)dBKmBLzOr6}pBm zDS7n`DwuB?R{lSgS?{BwE(RZuy*VL}V`Qa6tWjY**Hzut*7lI9^|`-eOQg}IbfC8U zJb;8wbFuY@6NQ}cyaMtQ>%|-9jvmDRih3B-4*qB6D#^Zx|M-7HbR1N*6=m8t1$DZPzV>|M=I1NWJ*u4;%SB_!$ur7o5-jl-77n0dpo)Mp*HXtxT{W-Qi+MHv{YJXP=LpqwAZcc!v9S1u0g2zBpsJEQf8GiX$y#DP zJ@Q=xaj7b+-8LGI=b|I=5cYs*c^%&QvrRdl6^x#^05@Gk{t5?Tu4!XUz;BrF(9wQBE#O%7M z_67~$Mqd_?KgKieNCtrU9CR~AK!6g7gUay#1P^|Nbo0Mjy+M3IlJBcN7L2gGI?+;= z0abeZ9P`8#7oE1*$2jY@uK@kc_Mu(s0HVUFtAulyF>;WNVl#1Ps~!|^Vr&qI9uOcx z#s;T4Wr4UEGZJcDLMkt91Dt)1VU`eFl?I{YVq5qy3U+6rLAKSB>Gs;p1hkvwvs=M8 z5Hd4@BUT2j97SpV{_ZuWT$G&%Heqx(H2i|%)VGYv(>(AHZ>NpvAlI2)Zg?FbE!O7KH%&6bMJ)cHCNlml%-786)F?ICS# zTFf%!!y|rnleo9z{K0Hh()RKw@if`m&Wyh%xncQ_>z}4eC^FrDv%h@bTmzTbY{Es< zfWD*Fb!e^yo@r}a^q?-O9cCo&{RG$(Y~U^E$ZYQUp&l(5yg(>hRAjq;T+j61{?DSm z4xJ)Dg7xa2x8&cQ&o$2v>uU-nRo_%#j16}??R1*g%N0`)i5h~sEzTjv$fe$h4FCHd zX^bc-JIR+1O@}+%Br#FZ5ac}89U|Xww(CBKQv=N5bU4f|J zh?_HZzu8^^h-dCxr=lBHf*`WOY-vJC{GxH6rmjV$Yt`}by{aUF|51&I2Nva$r(f7zRku2uBdZcUfmtozKVBOfJ>(FkdE5C<6 z^0*>hjuhMmPY}OGQgd6a4p;O+&~b{nvfg+ahOAG9F20+$u58Rm#Vj7)KF7%KcKBCf zh0f&Xd+u|d(0v5W0DOU0IXF1Z{@B|C&vgPIHPjy%gt;+zHw=83g)rN1lJYEn*IS$7 zQC&XRo@$^{mL;;Gfxam+pfYb>jG}wmQoO|t&!z5l#n?2l66&<*iL-`{pV*T658wc` zQJ_mmj1?(KApePzJc}nxyVBL zpRFR&GyBLjSyp+#Qc`%Up%vCm%G?_W3*ZcTQ0A%sqpg47J%zC6n^y+ac^aqfp1>F_ zAeCt~;(?F!4_hOyB>UCWlbxcw#iRa?d^*oqZC<6Pdz`Q)cky%Ui~sZ^Ie+l`nbq~S z^4Jyf>N@ut>K+fq=j#l3?z1(q5ZAT;NB&*--N{1qJ$lNbb7rYhMm4=OGI`nJwgZ0v zZX2i-1dJnpG?r;pH?y{K%KtYK_#cen0HX}8=_+}@0Edppm+u$|6oOVC1)ad`kj)Nc8#lJkrti?x!10n#fu%Y zCcZ1dc(5Y6NR!iyhzcCAx{)BveBWAgC!mNlasbEKnpH6FtC#s|B<_@)>BZp$%BB$+O~d`APxYRYgk3VK0~4okGpl8+vOGvd5uv2TLL#3tJNr~@yB zLDXM4p~#Q_hfNqnVgi?xGob%K`h!Tps4;y^fU~UANpLc-r3WFDH4QvD>_XC{l^_M^ zOoj)D=zmKq#T}?KT=N+9LahLz4!{2;bO;XR+7Tv`X_ z+I`|CmWP4NzC8S|!3bMBB5=dCf6icrmyT-Svc~^T#%6t)GGjFF`@%!5cXyx@p+}4{ zW}@OK;Cxv!#+$0@G-g$tFb~Xv=~(i?_MsO^rhN3zcx)mNI?JIBN<=f@x9Je=XJo1G zw$^)be3u{hYD7-!T%|n8Lm2dRm^aci6%yXjsDm01(DXsuYi$#J1e*qCK4|UN%79O{ zf!!O*6~>VkUTD3EY1vqTxj}R3@)IJVwxkR^0Q{c43`t!el}6ot>~lm0Jk}d%T*Che zCR7=2k&mlb73SMbeqMmp7735w`42~bw@^*4;V6S!t8VnjnCo)3(8M96opiZar0j<8 z667cN)e+OJynL5Ih~p@$-bl+`FW)sy&NBnbOhc}d1d|uL;Q_x6o-QxM^j+I23Nl9| zMMH{NMWb7e{#}2+ly(hH(F|ILNIdQn(6LPGA^ZxA+UdZmu)};s>e$mTZcg94N`f&e zhmsH){=a3SxE3`Gh}okaA+HXiUv+xT;R7I8^N*1kvg zTa*miZ4pad?Sx37Kq3DqtO8+quu|v`f>z`Fkr^c4@?(%0zNUV!Jo+0LY^!FD;zawMX5Ox%ljaFH}7W7XNa}#k)QqL zKk{~scA1|%x^=(LB+z$qQXlB5*#gGq?-u6=1LUb&xPs~JylOpuV#9|UhV<)}GkQl5 zNvC>$u^%+E1D~f=EndDjPc^IqXT2QhXETNy54p%c_0uI5#UJe02OsuQ3kNeWuuyIC z_(lb1?Fd2yE-p4c|AgtO7%qDuNZ;I>k1jSq%0PLq?oH(Gb9fzQ0j*`Z<dv9FWMWMM`hEd)6LX#yH5H1V3l`t{~c7%5_UX{<(mGeNbw4~Xwbg$%;m?FZ&S z4BYg;la{l0up`Ks6B+|T82`66oLPiOrDVr}2!0D_eQQMg>(U#f-sbU)oNa78fzF%6 zozJRpb!l<70_)UKAlgFY)@Iqf$_;GYBNSbfJh~-cgV(?RBAF*xpFkoN*h4SDLi@io zaq4#OlLpkMr6kiG%O-y7Q~*I>KFOc-eViq3!ie*ID8UbE{(5KbXAr_Lh*-z?mHue783fiylzsM6j4d&%kFsqw*%{N8U{`hqv1!$KTiD#okz; z+UP&S>ghn}q{lEs8_n=)Ib&ahw5wE1vl9A$dMVL8&1qN>kU>HNR|pN@e{e8VYT67M z<0K~j1jg&w{o0x`@v7bUUN0k=E*B+4NkBKO5K|jg7Sitgx{)1Q9>W|VI?f}|l_z2} zAU7uiFA0Ozm#=|cLVEsbCZv7&6kQTF4muj1`aV+(P4`)VXnNh4Uja);&^y$3D316b0!B#1GPUvIW<=Bi*VsiHBob~)Qy(1~9{ zLa7+ubAGw3G=}%eLiF;7{5;b-F`x=eAbGP3>GX ziYAvITxqfhKG1iia>Y4a`swa7#b9%|OxJFBcp&l`-AuOrRT8h#QQe!H(oXyqRm&e) z!JqfC61K`BO_lH{Cp@IFH{M_48KVKjb zsf^dK^0|QgYeq{^iv{y~+M#S0`Z%EiTuB;AKn%aP00O%(z&2|nrNJ#!Sha>u+SyFn zcu9bl9OXN}$YMS^ByAzm$G28^omEtXC8oHajL0G)W1{P;R7yfo<&imJIQjam+Yl5I z3s?!A!_Dz;SK#nyOt&SKM=ld(7$fDW4Yx^E#np2H9v5fJsR;rX(y1W^@FHN8h~Auh z6N_E@Bom-^|FO?0__RCNC%`4&Qt z@~@4$cZDJu1%fTrcr3JRhR(m#7RPRmn6yGYWl&w2g$lR+CaBiH%z;bKq^Y-j<;}g_ zLZ|6$@2m_Oq}ZK&i@TQ^N`44;IAdw3GRU{gug2ZwP#drAW9XF(?P;*T^B8ySFC+Gs z-DPTXy;m1c0I>jojA3=fr4894p2@$9ruv;O(WQ%5U_|zAE4DINdFf;dXp=z7MB(9h zr{p+kI>4U~a!bC|2?yjHw{~=FhC@4tP{y)O|Lt`vCz!xtH_kvV`ylkf;$ShHn0BJT z73Eq^karco^3ACTj__w1*TBObjOHvBtCKdp}AR0g!S=BJG@`iLO) zQfke3KH{E8R;f{pue3>fTQ$3t?^4w)l7TQ%4aOpCJGi)0gTtkJ{qELMMyjWQr(jY* z)ok2ioYWd~0Rrg!PO?-Wo1P2%r6WJ&X=8?LdeWBkC?*H{_!-<|+V zA*gfo%&I2LWOPQD*JXKJYBE>q3#CQ z9Nn|Cw?F437C^Rg*it7TJxOCnU04p?(o1R2?^uX&7qUAE;-T#SgXYK}%v-b?AsqG; z3P=x&&s`a(F4{BJQBd$Vi#?u2M)^1ne=#w{s2B%Z@5a==I@o@wO>uTdg;h8wKql4g?x&NM(um|iw_1FB{JfU( z;XHD3jR=HYiz_F9E*9eCdgojo2z^Ix+0r`XqnGY2d7jV^F4nALHB2t#x>Up)jt9nW z&3gkasE_7X(6nt1pj5dxmb!E%+i$sNV!wEdLK3HOQ@r@BTJtvt)*A{o&?P}hF~`9R2TRBf%S$WM&%C4|49Y~GHuos*~@ZolzCjO&topH9|;x}yuc%ZwnG3Jjv;m_|q1KyAGJzxP#P>`IDjX4NwjT zs5$GK>ph0L>9|vN!*&{gZVnG890JS^ugZT7BC+b|Px;#y;8jHyE6w~Tl1~anGysM3 zf6rSE6fh=zd2ATTF)hS~Edc5|rJQ0W+0RHx*2d-ZT;uyq23Oh<8#LR|WUk@Y{eR%6}em`aM52t6ZSu*mC zybgrffyVba#4`IgVIX#&Co3pPiQxk66%CN{g!hV(?{uFm0nnH3U-t`bltZv% zcb*n-US1FfmXBa9$RTkipK>8MS<}XSJu|X+f-kU0oW9I8O1`!!N;=gx1Ul3uJtO9% zxk0d*t}ao?Xw;ob7HtAjTBBl|V^57MLS z<{PF6nk=f*jGkA78rC$4+29tAC9h0k55Bm5=fAVgbJRRM9^}$k)|DlL#uf}1sq5rVJO0g zH?xZ3_xaWNY-nnMNb;Lcg3eq=eh#99q>!1O0<6dwB&PBibuNHP2!l@0?mUEpkK2-p zxcElO5oqZAUF3RxAbp1J{B%JE714)-X1;SWJ>wXVm3e3xc_ItCQ;@UzgSHJ@r>=-+ zr%l!)Mb8UwzU8w3VK*h$3o`X_14acZRjCjM`fZ4-jlja0k;Y=>2o9t%Boa)L+AZX* zEo!5AfjpHYZrtnEl+Wyc(6hanp1o`dO7tMvy z-C3EPvQseEHtzMvTAX7fh%O$=nEp_pH@DND9k3K2z2yAE^Id8OO;}_-=cgj|ThK&0j487HB8BD3^_-g#74 zfm3*Z64^O-GA{{+D4cAl)ibO@Nk?5U+V#+?FNE%C-|Z<;$2a8Kzizz}jzmV-=;ri4 zyb3qJv1fn)hl6tN>#5=v)8dH29SCi{efeIzCIqK-iOYS&n680-+`xsCW*kflp6_;` zli@Wn^GJi}q)&RuJz^V@gvu~N^SmNRxX^}RGB0K#uH-QX@{UKRAtybt*7TqY8v zc|NoWYX`JrU%B%tNy#m)cj3Mud!mUl3JGY90Iwu)dN$w zkdNmcB92yW^b_&8ukMV6;3xuTq zXI|jUIS_E_>Ee-xs4*U6YnVFg_m{dV1fcq)#P_G#nXf_>=6fF}$9xZy>JO@5$1M$Y z_XS=C?&c;Io|+OK+!q6N%#%Z$gl3~Rn4rmjdGke1cK$CL8Wf+IT?B44Y1A}nbTlda zGg*JI_}+Uv(1gOU9L{rgU^02)K=kxiTY?0oVJ&89|}jG{@EauS2^KEpY0aggnG6ZFA3|IwG`VtBSZBW6^qd;`_- z9>t3ZgFv<%RdJJFw$@xXGRgN)AM>mor31w6l_GqdYzG>Ei>C^mP$!LB)O}PRTQ%|6 zCM9?JT)L*zPW*tW^1-BQ0pqi%mv*6_wtU=-7yDA*3BaCQqj}e(OyVv>P)%jowiB(-Y1Dq+JN#PV(xI>8h%Og$Y=4YTv z!5q->WsBkRS3e!uz#xx}xB=7NsY_dtjwN$JuoRm|91w?VNHL`vgr?5uT*dd%X- zrI@t^#=K1*J7v!|QTrcOei-;O?O`tj+bguGIs^ z+mEnnBro6yG1OHzP z;`@9C6RPUIG>2Ghl~?#1c0nT}*%M4g`&vE^MmogH+r7jooM#2WEbSVoe2*J zD`vlF^g~kM`tW>-8ZK4@!0anU=-Gb1?S1i23VQ<43RgpWqb-7q)4s{x-uIoz;gK&M z$D1uM2#*x-6@|)E*|n1ssESbob(z;474>noAVA#^T4ey;#C=Nm@ul z167U)g0W0xyW@@|ll8vlL?dg;Ny^jKZS&woT)5Hz}L#VBOBf}W42oy2>$LnKPnLDSZlViUhe~HJQ z?PG;L#B{F2RB8shbK{4yM;sW;ZA#O6Q!o_~Y;ODoWY6h2iFdC+L$Z zzK8Is(tLG?K+3NaVV}guG6u zBs^F20K=ASMf=YfwSno-1T62i2gnr3&!Ep%_v)zxi!v4u5>^h3QU$K66g3DiPrOu?MRQ?DG;Qb4dQE@SGOd zTXufhoZ?`X`SGLHTK`3Ji{HP(G8M15MrE;_VosB^-w&{am7Djoz#)U0qDL{kS^mL| zN+d%hUj#4mf;7>f7~|mQTA8%?qsU^e!C+bK!l2YAVu0Qk+>?6AiIWY3?6%b z#VvSw3ceva(fgh})%2g0zU<)MDP^@%Eg(}NTzchjw+Ep`9wrZk7+*wSp)joD9EZzI zVJlQ@B?F()M;|DtJe=57xY6KCI+;Dtxwx<5`&%;jzBEXze54fH=NyP>9psj0VoFv{ zHCb9JsJrfnVGu4eR`5~c)I{aoJSoX8mOOWyWN?<9H3RPWXzt?L@MvOI4BXK)LVUat zR16Z1@Z{`(P>YgHzBWY;Wwt4kkWBuQ+rf81Qy>@M7CGF_!=sM94`7Y%7lX_w(q<3u z0#c}<6(#L!VDjvi#I0`x{K9*9s&L3}ryA$j*GjMm0l+J;6QkZN{0713toJ7=!68*? z@?>AQx%NNZm}ozV3|3%1?4Uh-4)y9+{hS7U0QQLk(>ZU|3C@yFI$z5eBz?!jm}znw zt8}M&;?w&bvL5XDV7pI5kjPY9Vr4(oux0dS7WtJ+6lrA;oH^AquTmc1Y~udbSx4_U z|LfdgtY!-ri~+wj0!Z{VCo#$>84Ck?nhdIbNok@yT3m5A9x-aZaGs_0D8l~anzKRa zuZ-DftRqOyVi79B2q$RaLV6WmNMun1WE_<}y%Id91fz~mjqk$N260^+X#j745PrG; za2{JQUIqWEO~+pldj4v%%?J@{d$)YB+Y^<5Fj}=5eh368P@PWohnI!<2}PKCAH0zh z^`Xra{7$n_RG!g@nR|7d(tANQn(n!KJ!9#!(eylIkjOekZZJhI?zl?>C0))eYNUl@ ze5Zcv70psb?|bRH8~keZyYZdaxuBE!qaNUwVCbtz_KIo$9c`cUh~o8q@tE(&X=Jo% z2=T?71gO((Opc@b$&3ZQ3XC;zwH>9dCe+3J7>6`n38_;*08O!wc`SJX;h z9jxfdjUU8jOQS})xs3ehkNgi5j6L#QloSuT7LIE`iSlE4&|CrCb>bKp;bI{=SmuC} zmsTG`J;dhyxb_Pkc0g&+(#pg{1}R)?!?ke7xT^Xgu9;n7+b=l-!= z<58EQ;z%DGNU~8AA~51Q5M*u>Agj;Rsb!Kl^DtW(fINiH#fa$sB#*-`)i<%KICfQf z66E8B6&B%5eKa?Zg(2hO?))@cmYG!8jJ@6ry1w~}q;89HcO%~Y7FEIi?B)-2J?$ol z{o%U5d);?;|E+B_9RHvv^G-dK)3@FJSM5lNWr5-7f7cxre`WcEHgZRu$CH z5e$=Too(z|Nk(h8SYLXUSLDs>Qz#mcZxl$GyXkN5H8sD3kd6DkuB7;@?;~(o4!R=_ zHb*s3Yr;%RXdkQSEab`hj`V(4VB1OlW5K5An7)RkO@GB_Wy6cx5`we)9Kx{S_uBMos`ETN7e*Duhjxo z{T2V8a>qZ~kAF0`f$!8l+J=Wp!7*v{^MNXX19#Ftw{J?|l5m^$8o*h40rCkRxYupmdj?buMBX@V7GCx7jsG`Wu{}m?V0HSHzvJt-4)Jz{@N4GIc|omr@#Vk0tf<|Pge@V( zTM5S}WEv>OF_B@Qa_@sj4pJEDIaM9&b-bcuaE;YtsC#&ER}zN()&cHy)an@tY#-A? zC6WQx5I3jHBVeJ;Z|nvx?72|ocpU_1@d{l#37yTSgzaR=rrqSqGp-d!fm{b5cmP0O z*3aS#Ka^-1qrSQQfCl|sxW?{-bpx0q-2cgnyFHvWKRKF6*AWxSowRUi8X5Zfen8hk zj!t!;Ch8WnJ%|rHVO&d0OvhBuCvxx%EopeN={~(*9ukeVck2RLFF+-_Z#EE$wjq!` zf$@j?d34OeHH9a}!50#+yD5yzw+Dd|2VocuY@0b(RzRmMp2*v^XiXW|IsS795N%iP z`pc_OZl=noPl&{@iG_f zz%dHOZ^>0fAL(eIl)i;7uB^y?|CjhLll0?}c=Qc=DXr+vMQcEi+n-?6v`(2}*t-HJ zsbu{3xvx4E9@$a7m^CP9kHd*%@0yDp8Q~9Yg0KaHS~qMHW#pkAx4+*}9J6pq3oxAx3Pl_@zLx1^(d&VLMBOgSu!`xD zSK;l32(hP^Tj&Y9&ei1`8ffq3oFY-Z2xBAq)&5Zq^AI7F$uRIo-(0A7%>+ZTfQAt} zbP3r~Ql`)@kpyJ*ISh@HogH^jxDR%IsF~{sNTA(xeMi*jv5`B4+b+EK*+a&nDObyIA&$Dpe66CmFZ{3Qp8kH;dvCLFtbitQIE59rV#MqE6OZ4CZGXx*pg zL;iw$#?ui~1qQb(`Mzr@$$cA%(3v@Ot{#7R`{qpS-TKJ!j2xV$XDnfANZ0X+5z|R+g0a@-@|EA z`VkioHCA==6$m1_`Ps;;cy;V|8|a;UyZCH~KW0gFtg``MS*nQh2%boCd9mv@Ug3;; zW=r5=h}o1?C8cTxUZXtsqvCN2wBISUjqB~jAb$1Q z@6p{$5iBkPZg3y(55lvR*|LP4)sT}G>~=#iUbEr3Je8LldUeL_9B(zkrz-&jJ9d;0 zCl%*&>~S5}*8Xv?X6G+x+5ZAE2|yw-w9ADm9g*JD*P{-d}PNrSaN{sN>`@`ncWd*5P%ub~gwop~_a>`o{V&kzo1H$o(=~XHS+zXZ&2s zwD8uL zm&$nM?VtRy1Lz0_0w^LWaG&W#Pce=7EHA{gIucuHOsF)|w*-EjmgoCjE3RkDmNyEh z>KXWrZ2Oo!S9A?aN}A^g&<#SVs?b%>Ug4Hr4TInFu2@Ywj~>$_$Q5eU3xw6xv|b5V zRPV#o2`CQNPhMDOTs1{vF25sWQjG2^7PK(h{r;Nd7J+Tu0OIa?wTOI-_pT~8_3XiD z+WhwxEWdLAKsxD2R-At2ZKS4ojonoOLqj?ef%>pDs@N`gio#OkU~wk2#HXPI7tl=w zQo3nc5cHY)1=@;c>;Z5bvGC*XHXiJ~?904gmEvEtk_nt}w+Y$?gs~QF`sel>!bT6o z2$K;w6%P(^@5x!H1K({jji$Vf!_P528_Kos&WD9L8jn z>?&^|H9?dk8)SonCn2t~hi!XuIzp2Gt=6lPIX9XK{if9Fau0rM2dTsj6dVt7rk#;q z)Pcc7!z2O(+V?i#edSM;y_jkhlZ**#M6zNZRMIyD9#R+il5XyNf1tpy`(#(*Y%&WW zAywo0JO+5z@&-(q{R(rd+rs(6Hu1`=C-YS#X*b2Hf{N$f-aR4xcv$3;nVph=kZ@%^9!_ zdqgA-JV!?1KOdfZNECwp?$r1`z$6pitl*RT7)oJP`g6;v>HwHkEZ<%em?zRV*dqgT z?G$JbkWr63$4@eFdkxsr5W#$SbnvQO#6i~K{I@iAev;B(dBaY>?bh^9@JBEmeeL?Z zBq)WsLD#RH{8}7AlR2dd!N-UIA)^?A_@lfaJklK%E`4Vnb2*goaz7D?9W>p?!00uX zjJlsLmE}(*wzth5D@dx`o>jRJakO;K(f66&Hvh-qQAw^k^Kjlfw*P8RPds*qptDMf ztL$bdiMl~i?RfHj*c;W zm<)mN$kRq^7!g^(fXIBZjUKBoB~JJIbKT5@I`c3`4xCpJhC3U-?&BFjV%0Y)>Bjy9}~FI8B&oot+nL)#f2Vcr$UY z)Ns&tjQVdt%-K}JBC)-ut{4zK$)+NeXZJc^nH}f!*ngGM2dlC5065tItNIe2PWPU? zBg)g0A^WFsf=ufa_siN2>$&I4A@i7ws7}xp z6q0U8@f^0G2kkr!N=eUu55>+SU}YlmWcLyzTnGOp z8EVCgHkp~kv7cUZ#G+hjg%Lz?Djm!kGQc%;9hDnbjCz}2z)*e-omhMLy(@xM3>Wcz zBTwU?KO>1R2ujp}Gg4S$>D}2Q`&MGsdP3d=ira1EC9szQT8Xr5*_giZ?{TS~FUJ5* zaarp8)g)(EUmBe?{$xRa)50s8=9ww?Ecxv3259s2KoJ#?Td&ypjnpwfGV{lDStLVA zhcn|aXk|-je{ru&>c~2K)BNdR=jm1rhy%C#;{@I}M#Nx-B6CeBa__*P$XEN_sQWuu z^uQ&ZGZ0;|@UH>N)hpTCyY8jLE+%Dxl?@@b$~D;+4hS^%+iJ}UNRLqd=uX_Sy%->V z+cn*jWSy@1z-0sS;?ESJN^R-BtCTWrcdtT9u`{eA-AM$xyJ`#Rpt(oA{lH4^lWZ3W z(i6h)t>m|z|A`Mw?ew}<{>}Wm6x#Q;8=OXnA?X^clJ(yk^7YV-BBdlQhWlNY!Vw|O zUtq)UYfULKVAw!aav*6WS@p*VfyHF!6ea)5%zUn&tkes`T+Yc5C3d@o%__Ad`}w>B z;JxAxQTyc{^tHk6`hJnQJ~f2oEz|6qeD0VQt549X1Y8^Jn&6S!B};-v19JjCe!B4INPZ@zaGLW2qm zn}t8<@Y~kiw3(7jf*9Wm9V;76^UKhug^+xv&>T+!S_Rn5p&ahTJ3(&Avl7cw34{Y* z^7<7}BrEMaf_UG(5GL(2624e$TUR(e#aKXl%^VODe>nOQ_Ieu%8_GP zeWzpgv;87s)xpEDKypFZA;eX&qL8a^_lNBAg!1A?vg{z`iH@tIdb};nC@bp7d)JYz zu6x*rZm0AGCRQ-V+~W-rCkJ9oUr&UJwVX~g-h|zvc9U~Csj+GGAki@oyy{dLvos?S zOJFL4O;W=AE6>H-0JX8bHEpeTH3ZiVGA2u}QcD_#)WBJ0r6rQKZE~yCO&i0y9}suw zv%glEw&oTk`REHN^`j1RX*c?Hfgo^g%fo77=XJM!-GoHmaElEmbp!1()8oV^_Mc+? ze6Y{%Ik)E@ujPSY6@5$QtTe0APaq4Xw}L31m8}z~nxe!Kf93GO0LjEyNN;mmgTMoE z&rd_o=lU!mk_mga3O1^!Uqpdw&9u>TWaQ}rr-Fgqk= zs|{Cdrq=QjY?FyPJmieSn;XO}C_n+S{*uCPwTFb4{>$2fB8(&RBf=|n8$3E4*XuiZ zPq(Cq7n*?nTyit6T~1Nqjz$UDoZw*L&|IZz(yMP`dMi2)GFH!vlL@bxa4;uqMF@dL zlhX4YyDM$7A5B2PghSZZ-OY!@sK>_g#gi)pVv+DEdFVmu;fG4)omf1Z;W(88wY$xV@@zGSY6f z7Kbn;!+^LBBijqodn1T zWI>>vRKhC`gD8>%5wN}w#wJ9l#DnZIy0;EE%5iPAzN+|VF@1sZ%o4HdW$eU|?yu)8 z)oNr*{neUjuwZ{0ciB5e-+m?jl%W4Z!08D++Fl=jj}?Z>t9>aUv7DCaFnij%%7*;o*4iFV@Z3NI&ITe-1DqaD#$^;7h9<^o!_A_r{Tp3Vj$0}A4?=+BpCi2 z#D)^_9MD*f3vuCj1>TmM+tG64A@+KXqyLx^(~nZn3;*1$>SN=jED+C%$jsbP2@NnS zD4$)JFywR?&T=c|^}jL_wY1MoI?QUv#WfPO;74m;R@0TphNcfG^a?klwM@Dc%gW|* zRvhIAS%LpVB#cQ0%mrH?!B&e6q43SGiqJ9d7${^lc@w%{YB zyZJp{J*oKSeuDOIldQhoU=noHii^)f#3CwK9{QGQbkP?^TKU*Kb-w@XOpOo_w;##E zAr5Rnwf~wvgC7lTGjG~VQDgC87=dmZDi6j5-k5ACh5DP3}sZaqE{*%J>0v6kkNV(g%U{iiiOd)WR>0D zLl1o=8^(Gr^o=#VTUY&bJ{Vu~k*)EttcvAjW{KV5E)k$}YrDA_^>c2u)fBNpngV)#>Cp=(+~@vfe!H0NJ2S0QQ5PH< zyUnfcqNaNlFR4gCHOKoVa6EZLnR`EUJpD ze1sG_P<2WM308u3EJHLeA{NR>oc=f>^iyBoP`l0Ncfl7WRj$@LeB_O71m}` z^J0zs(=(aR#(vKPjmsEoxB7qc{oKmIGGoP|O70x=U6WP5eg;bb1fp7VIAgm{ zFu#z}m^E)EbZk}TWcd$`Glx57F{KZvcW`8`lZ=H3sy#ZATK?(M+liuXp;URr`4%Bt z_2QU)g>lF3L1kAy;$ZS3%=;B``!dM@I)E8MpYD0y<_h&7(e%JWgau{PVwwVTnP2S& z3bIwcn=+kXC4oB(YaUiWwruzgkw{v9t;e7wQ3A?;UIe=jBul>MCCU1$rdzh~_XO_} zcAtOF<4tkQti|jqrAD8;OR26-dobIehy9p3L-x_`-496S=McH@IkzD*$pobv^yE>C zqbjt>u7G{aWoaTct$NKp$BnG!y^Yq9_HUszWQ9lOngOcy%ImO4e;&Vo?CxCc3UF-q zG>?pOi`CIwTz?DG>_nN_D8xy`v9=dYAjLYJ<12RR?0U-1@643dOvFMGap9PzE8*AK zXzD)bA~grp%jsS_iIG2J-nM}=d}zsl4w4VPH=J#f<2=kRFusNM34<}^KU4g)JD#Av zL{>P6{*a*6ul|P@R74Vn-uE797#kBk@kzypc>T!U{(Mhixirdd)U%8TNU8i?q8Tp0 z4VxDB7-u!D8H7YeD^_pwF|ow=JV@e7D@I;j@%8m`Iv0}=RXrq32;R7HT*UwoH`~_-N_`@*0)On1EtBV zqvs}D%Lm?>6{vhVJk;H^db6TKTnf$kN_>Je2uGFf+E<@(hiS`YeSd zGv@tRlglRMF+7F#pXX1vcUG*D!N8dV-F@GFPqC-mgBW^p5dC18-`&CqtJIUl+4z;9 zFP`xe+&i2Inb^>txq4YT7_g=T_^5jCsh`jCc|@o}&+QO&&1F-)$3O|rZEz6|P#$Lw zM>L~F`@_U8R2m84*u-0no-aj%&gj5Ur+j|<{(P3WXoe=!6|Zc%IVmg07Rq2fWOjIg z7uxZ#f^NVi<{1ZemiF*hVrSP&JB1#`Eun<^+G&ys)71S1Bu4>+L&J()bvc_Q&JHF< zV)P~E;`L2yH18qLcJoU4RFhm@Jl4gOTAIl89n)KF1Lx4MH3u@6mBock3%IuZU(jMq znDT0;zug7NQ>(-&=~{{&u&s()A7Jo5lzq!Cs1&;DGC`Bs@;Sy$;>mbdKxEi-dLk<9 zuw-VuTSZG@!}O;0w&w%Hf$vrf%ah*;l;f#>IAbH}h z>dzIBpO6tnd)V>neJ6wJ=TYlW*{;3T@@89&hffob{(OOX-Gkg{TzI)Z+En*$)n-Vu z?UL6YJ=XWbY$F;_(GvALBI$~0>u>n?fpoLJX^z6ohbu~l)~%E!rIwBw%-E{hY1Wea zx;+t<61z{gC3ZgOxx}8c*n|}XR;v0|fle?T5jKgN#mxG|i4T;=sZ_jai@(zR;PpjD zP=x|*=b!#>#%K15Y^?lvPU}L;7i;G+PBh2Xu`+l2FV6k@h=Yy=2~Jm_)Kk4HjTokf zJFL0Cd(W?j)M-Y4+(JHFX}$3%_hx=Vj-%q84J%es_gJF%-hH1bXlL>6N-6NDjbFwY z7l_aJ*wOyr=BwDRZi;fwBW~OD5Baely7_S3$39`_ty|K}$Qt4x;ziZ%$(r4nz1_9~ zRMv?8AK@~5!c;I+dn}}rU?@2AQYERt{2wW^I=`)9KhFp$t=&atrDP8L)BP+Rq}aSVI#3$5N`|B>{bwe zWAsi_vD|26wQh!-yA83%R{0&tdA#YB_Y$V|YM*!aWWThz-W`Q>Plm&89Up+Ms|23* zrNO)8F=Ylrv2m6UAKQLv=L*BHLmb+XlDj-$*Ld4R$nwvEz2Po&>DlrtICY4x2I|%nuw@dBX9Y5Icqy~Xkkrz|WPmp2 zw>R9iL5g4ZBqG7Fcf9Z(qFBhN8qTA%q9Q+fUQhpH43gmo#%VcA0R0+Jj{tsX7F z28EUnD%7yPJ@BP~UX3dICdrmp%W1WCm5$CTMj5D2TX#Rh>6C1vzTDmKLATx?P?nGU zlFEISN5IUk8yInhH+DQBG9i~3^H)C_KzeHE-{uhaf#1E^-~EvD83YXcTI()AZx5dB z(AS&plgK13KWg?8@`z_6;Rwz(oS@^PZBFhgAMPHi4IkO046ku43kZ@?n%Pt5irh~7 zzuOE`{bDRwCGwY03fK(y^9mi^n+`Q399G3}y!q zOnQBit<3s4lL^l|&+=bRlIOp)B*{w|ix~~o;^NpO7w!CZx-HBoy27mr`J>fu>(Hl? z%!rsYQ}}-%X0WkM2Yq>H=4!jpXMAY-Z2^z?k7>w5yN1qyARA+lqpgmlT{)Pm=TSN< zDIy+Qso4|)@y?WWlD_L^;(~5nM<2H{am1SdN$%%a3{{!jZ+48Mkf`dmZe2Gzgo(+~ z80%xAA&W8nZyHBq`-?=FUaEWrSH9jeJ%sQ+C((AVL^_)9&-@uHR8ui~(sG{;8C5bSw; zuH#5m&F1#5`NZ~n-DUSezU7~J%fmff%Tuk{`mwyj3thirbckn@5B*d*F*YiDw5z@U zYcCBHBo;}Dg|jJ%vlsS?$MQC(y@8#&?|VkvjK)}4%BBq@dBlE9eoyKER{&O~BZl%2m;9~$ zGz#h<2X=%=_K`Fi%d4$?atOWzlBk^fJRthrr>>~|L+;oAbAJeg^g1`ilX(m!NT3G3 zYp3r%xyg>0`sfpq)!EbCB{_-+DKU{}Faidj9B?L6Hs?T0E%8UUoH(djU6zDBK7gM7 zkV>ZNS?3@W#uEsz7`v8Q53aSjK29v10lshEUiq3_i;(gEp;>-UqgnV|x4f_r-qF1L75{WgUPe9_VPO?K?6V=2bZhy{l851{oR`?Y?0(A;YS2jH4@2%shPeTwjJzr$r zbR=3+TLxnGWbkx<<3^hi#Mr!pr3ogy3v;H_n<9uKCin02-Ha#Pqm2P7@u|+~#lvUT z)Fc#X%m0gV000w3KumESkYjGb0!Y6Ylrz5grYQ3Nv7AqQ|6z9|&KFp411?;GwPOG) zlEH6!@z?*pMH+}qI5omgy;#NqxA?SEmD4raWWQe_n+%5XeSQ;P)6Z0Mg)q5&%Wv_wCXP)IDj&h8&od{!EsgB zkjb0PMls~1~L{)SDgaj;<{hi(z)hn8mz28fxz7cdht%cmEQ z2gL-U*H4bY4WXP}H~(G)^fCmCloCv`PdvES6%(p|pY|5bDfck{wAiCRK7n07k3Dr(2A4UH z%U;P8{rfV&mCj7OB;tc5_;kmNe+O}(_&9;!KU3>&Wm1IPo4h87>}7s2;fW(s67SIQo4@FPSm2* zwKf-pe)a#}JLp`dsWC9MlERcp0kB_%{r8AQhE$o*u7D1>qjqNDZVE}Swfdtlm;WBG z;3TL;y;KfXk(GAi5ImviJb}PtE{E1+PeD) zeqZY3cht(^ zxlW(qtRJG6^wgo)D9gIC;8}Q^IRy#=M&PiQL?1-k9b-@>}R>rq93k2=D1!{5^jw*}3 z_&2|S$h!J&bKdfc&SFQb#2ThR261Gx8Y9ZmHi}k^Xr0+_X6*XO8Jc$G|EHZJyu5mk UK}nk4X~h4Amf^K(4Tq@z4`@ENEdT%j diff --git a/doc/source/architecture.png b/doc/source/architecture.png deleted file mode 100644 index a54f873fb11a5010eec2e5322b3207bebabb6c67..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 60234 zcmZ_0Wk8f$_dZOggoKC;a-@+)I*)V+NOz}F14Fk642{x_G)RMV4Im&bJ#;g4NcaEd z9Q{4#{qTO_!!Y;0_gec}Yh5e$guYdh!NYlmgM@^HCnqbZiiCs;LqbCNiG=}tV&q2t z4GGB~Nlx;$y8Fy-I}>3!2q^(sas zJtHE0W7YEb)bH3eR)Bs?kv?T@^C$N21wvfq6)m4u`n=-7W+>Z@*cS52zG5abn{9p# zulA;OEo*r)I~m9x4C(x0WPjRKLII^{GBWX0Bc)T!aaWku)_Htb!BA zyD@a%C(->@CG}dw54qR#Q*7-EvLOyjKR20t-cAYN)qtNl{LvQSUV+WayP{YE86!~C zjBI~7pECaU41KDo6WJdA+k`)7@-5sjImsv~%4|7hXLbh6=<54YNk1*Je5>xyyRsGa zo9=->Q50M}40ngsGxR8*ygM4E&Q3P*bggf$prIe;$`4?i5Fm!{`pyp|DX^5Wrum6S9M18!&F;4}vc_dw0Y-CEWSBEAVU}cfT4nEJ zG=k%2?YH=e`)*0|bFxaKj}oXstu2@tQx1B+aZHqy?F_OV9+i=Pvy=JExwgVLkLp>l ztGExvx!cujVwNb_uk#l;TSS2xZ$HrP%I>#aNd`EXA+r_0Y70yzI#DYQR&2-Bbd8!Y z3ZCFg`KVwFdAEVQW~$m89>MN>r}>F1+Jd{*?Ho#K)}_@E7e5bc!c__v8-Ht%_AfJ> zLvOD}u_ljikGupVr*}seg^2NR`zxE$uV)d{Mk37hdp;4n7(O!W-V$vtf8_nUY&$@M z;IyaJUH7`KVQ!v+DuTU(;6#t?UwDL|nQK44yE$1p)37n4BZxD-muXzCyDI%70g96x zZ>?kcoFw6r$Hxl9SGJ2{-<{vc`s`1VzxtCRr-Jn{n)@`?E+{FS@;^_;d=9{Hl~qDR zHdG5u3FmT@{okhE+=TwsHW0j1!6Srzx*?#Id~s6ze;cA^B`$QxP^F;sSR|0j*?{yZOn^yg^IQ+q&94)r~Fh&#{o8q2}vY7 z@qbLfvHPcYiMKn_8C?L<;ut2T!)9<7LI8A04|%iwCZodW|NJN>hFgc*!G0tG(QAJh zvL$ONt+~a3PF3@&ALi_7ec)h2rr)}IeEsO9d{3x*qLFLkMoN#hfkxecIeMBvN9?KU z2aw?1Y=!C4y~) zS@ySHOtEM}sG2!(W>-}F6C+_mF)X%tM=$z0lko~vCiVZbAcoYI`^Mz+qiH-(Nh?qb zKZVgwp@db{&#=Qc|NEAt$Ylvi-8b?{iQCG9h8J+k(B*;X1d{_G%#FqDJvejwt;o)J zA4=~V`DN#bGR|b8;OqbM!j`B?QWK=g}r#l>TE6-#(`X z=qXfz&3pKp7ga~R-B@bp06;)|_!U(wT=+>=nt<3HwHHi7vdezn`GJ)$yOAfl7wh<_ zMEz0p8ajGx5m-=~J?PaN-9IS&20*#3v4{{;kGbSv-v7O3;I;XRdK*jH%1j4y+4rb~ zK5q2HQLh6je)ucY)4XZ^-pkg~&yICm1{qMAu4H#!d3+`cvj)l>L@LRq2K{eGsX}d= zu6A^GOJ-)IS6oN3_>Qh{Re1TtJdHdloOrcqmBYb|X9~~4P5(ty{j6gYyMx2KS7pW~ zd^`JF?mRfrHlIn2!wy;gbBxfE^{a}Vt@gcxlYu@qs1(Gg?84~>x7b72YEH*1)79I; zd$M1yCwPZ!mA+sdB$1WaPX4SkYUB^#(c>)gml%s1Dk~P)6D|6Vmyy!sZCE%^lww(U zw^P0(rvApMO(E%h4jlZCQaZD2-Xus+=KQ8D)TV3Ba$UtAgj&a9cnB{atmb{J=9tZadT;k!Mh69CgCAu0UdsL1 zIOm_Er|w7sw2fF{0U7E?8Bl6Gw(LsGo_~ef<7u%Hz2VzWe5idaP%SrU`JYz=pOAQ` z*&JBrFRk^2gF|+%T7_zTjsyAhp`{OmXs2nJ<6JK;s;!EqCW^N^#g;?IMzyH(k01M- zRcIsF>a6nQjzNUHpzj!Lu8Y0RSXDw;UIIG<;`2l)Np)3!dBf1*u|(xgr9$QTLO8gB zn?j~q6l@8Z4oMpTQ&E^wsFeol*27bvLE1 z_BGlx-o^6*`m7H|WL7Kp1LupInUO{S*Yeiz)O$o>Ht0Xb>G}A7I+0B4#JSXu738PP z(ct_Y60jamk)*P47I7xyyF@(sb3BL&vy+#=*BG|JzfCDH#H|~)^|WQkm+cR?`N%k^ z)Gi{mb>Z4(%fj5-P3386tcFnEH9IaYD}oP}PwzEwQ?Q@*!B#D=o0cD~lV+K50d7-1 z_O&4B_lrh+`4P^x(T`ZRW5Ns!#=y4TZlKn3c6?!Br0WcdfcbtIQv?tLBm1dJ0m^;> zSe0*~*qLK*E(NY5e-D&+eykE8fWzW8^T9Z-5oKIOL9djx{|}VecySaaw+6agO8EPM zA0Of`;Mme_ZfoiE8Io*J)za99I+i0yKum9XoUB`B-@@=mAWO~>8L0oKiI68M@4q`s zA=E)8LFe<8@KMbz5hGpz4@Ub?)O2tDnOCUn;uHOEVP-$%IhZ?29$9UE`mM0<4RLGN z6~fHuJ&gGeDMA!L(VN{+zZRUlHju_qa#rXh7+7WeS)M%)R^mWlK%x%KDe-?}sUkOj zX-t5L-}}A`F7pBpdV#T#zEioL>1pQYih~b;;iE6}3zJ7__9GNu{y2dUw4TJ&FhoS2ofxTLr@YA1%SfvV%Tzwj1E%NfkU^TwJV8=}As_|Siy0rd#u_FCsvA6wz9lbQL~ zRzxv|Q9`(cf!a}+N1DKuM$m>SwAVTI8UJx$4NLq#&&Wz{lF@;sWx3RBEoWJd%<*Dl zGOj$GbY~W0)l}aP$?MKU#YIHu^4$n7UHd5?T~0P$SL%?0sc`phmFl9+O5}okBAl`a zRT7c|d_ccIiYpXw9#2!{@%21Y=JCIn*Mby1b7Wwd z^4VoA4O3CO_nxwxf>1(2qE_N!ow-c=((75T=oWjMdpS3I3a-6{`Kgecd15qM>-9w=WR!eHTyN17Ap=~z8A+-w@2Yn52^v4#grDWC98b( zM4xTqgD26gu8{M7Xxn^|^1*pkIL`fk|G->)T$=aj4`;iW;d27!pZ%rW!~U;EtMw1I zhLu$`an8hmBrIFQw`EzkBYo^`X)#rnA0?c)!|=K%=l4NTKcP*;Gh%?BSA@sJ#26}{ zUjv-TdFoPN*XCsg>ggAZ-}iYvF9~&e{s3&Od9(b$o2bUR8?z}qwh*O?3)CH8%!tLb{qD@v@Qx-&+7sc=V~=SIowVR zZ%Yqc^v!lnY2GpA*i5z2(J>rE*)R<$ceTYMvZA=iZBPa(RJ0s1qDXRAiv`7;89_=N zQ6p(af9xhZhq1b&sqY7TB!bPr^`vp*>v>tyWlalcTj5=7v#YKB^=X&^jitKXrl#o0 z;P~#Yovi$fl`oZoBK?c*f-jmt+WJ|x^oZhX2~}jNhbTxQAli&zjFf+fys*X(NpRsG zM@LO^z$q{a{``1>vbU&6#!FrQ=q|})QS?ID&w%|!WPtb8u%EEw1fNxl2FGF5RIIA1 zYs%updwz+ZX8o9rxh7*{A1j3t)~5zh^WcjN#P@l}GTO#wCDWRL0^H*B>;D{{q)k!9 zXpGgJ7smYyrl{(k01 zi6bCeh;npq`wNeO%cou!5mV6~HG=!#2eJRyAp+WIRY1eEm3}feH{aRny?C9efBiL= zEBH8c%2#XlK5BN7{#y0@68A`HL^qkCbOwP}LiG0`_s7PT3xDl0$@eU}7-9>7u3)v> z-Xqm!-x{6%rF`rlPX1JdUu$Pu)sS7ty{lyWLcPN6riXHC%t*s4nf0pB$-qsI)4XL0 zGaPV5Y4$@UpeYXv3Xr(*bP>l*z$xa5tUT+xHHe8c3nH)ALnMX3OjvAVYDUbP@nyu- z(onq{gbt0+VPXS_>oNb>4cE48Nowy~7Zm(dHzP0HL4m$C=$UX{%~}v)wwZ~Exu>dp zUq<%lL@niu^Y}&;^75<&$Qx#;lMbS~5*CsR+scp% zTE$2Tg4eT5zzk_Cglo0?<^04z>Ww$IcqlxcdYf*&bN3|COiPY_VWNRP`%MMW8}jE_ zouA*n89=W!e>PnxsoF{QY0dA)8WZg`S<1~@6>5^!yNzUF%U}K;ib~u}s7gt076nyl zk6A3Pj!DHsKB<|0KPNC?XPzB!v8D?SO~ht4^cCV^xVGcJx51@86=yk!|FCLTTA|c} zg()^kiD$#?*f*$Q0Nz->E41mq%RlKJiGH&3&CPGh`Sop%1$=h00XEH47`=x4gvxMl z6>QDhr;Fhd5#{zW;-qzon4fnnBq&fJG+x-|mmF<6Z~0=*7VJDLtb$NHFs#o!)4^!n zEP6i;U5V-W`UwH?)+eo|EM<;S-Nq?*1Wxy0MERc#M- z0o#9)`_gDffGj`i>k`{(CMK=moMhpTtb%hk%0g*dWsya#)|TP7L`HPo%->061x;z) z!`a~2FC?+n_;_6Id;HE~&4~W)8svdIzcrJjg<~PkNVQC5^wvjrJMOn96s!80g^5l{ zrr+*IXU|Xe{K!kcLM)XFo=cLVYx*;FRA)tvIKF;z==}n`#)KV5#L$2#S1{)9^YfGS zi$H7U%eci^zvJSZ7*RTooYq@@3UfR0r>PqWu1a0KXWS)Rrn+Jde<`^KoigA4A{q3oS-`?D6C#9yIiS}iHA&nPD!oaiCVmu z&Mx3itsEA)B{){qp-doKtMI=0m30T7S~zu`{t^~%T&*CM$mZ#5eQnM&er>i7Oc^nt&^pBGHN9h|EBLR)^Mf~nlWnI&3>(dYQ$jT!E@Fxe){V(JQ)i`*n#jY;zy4s zu-;%P$3sFA^tNZSke0a6OVTNqR<`W*C?^$%jNc`&FDK;Il`Ox6O{T$(l~7{*Kt+Gt zQSc#9%uK*SX|_dk#s6WaKP3MBc}eYnlvD< zqw|Z1-Jclj7{=~0A=W01ZU+u_X4cK&Xi4dxr(XyFl^sR8_nuZOUxr?DDu(tF0gSug zHDwNs65rCI{p6exUHd38+|2OK%msr`JR|`djPNU)N$_)Qd*#+vqcmFJoupiHnJ(61 zGZpK8CUjSU&2hl^cr9@nOj&E0$IUyr5+f9wC7@sVNJzR`ZO$?_R1owzhw$S14R%G0 zaRa3sbg9G)p!3=2tsmO%Mi#gFjpja2l^<+{wiz&8s@?Fl-I)0We_qV>h~_yweawrj zq|m)Zxq9eJjdciQc%?FeQ5`xqu6c5lX-b`GX1e(hwKp9&z{?=-VmJjs%M4`vxp~oX zlDO3(6mO#zLVLv8Uo)$a$QAVZ0H*Hze~w*%w6 z26*9qLNLvajF}BJ7DuX&K|;7eM#1o^@G39J{A%xFn+9_v~Rzo3Z+J5KMWx1w9VNvbCOz2bFBvv_Dlb}pFg z+j&a0bP5nD1K^n1d!-erT|Trn;__x7z>m0+EvlXFQy{{A`=)q{VRzU{}HPp8Y z9p}o5;<>cCPf}=w`dWhZXLuEEWPfo%1 z=fr2{yc8)rtjL;i_*fnA5_}*&4M{ngRrb%b@ zvQfsq^5BsD4-n^#HW7tEI>Lpp_P&_ZpyFy78J>uR;;l>tYHkoReBWteTCmKi`IWbw z{Hz;i70p^8!#e+|B4ti7K#+)xr9LA8I3CBYNL@2(^QJzanVQIV*;Y65oDF=%*HSNg z9B9UJe=>D?rvk}5JpbB!70Aw9ENa(rVRmt;zqWM*zt}*d)@{*ZipyV!u*XfK zO|BxG#-o%4jGbQ}$yn95zCxTmF%&HkWujYub45xMwQ@8a+18_ZyU=;+cN)C^be4g* zV8y_9dP{H$I(BXVa+0$^PwGEpsfw4@4AEhFo)cy^-oDuDnF;{)kDMep6?_h@8_F5y z+6HHved$FF4F#U+`i=pVxixXB=BYaKSH+o^GB?Ls%82+gp@g(#N5Q?6Vx^_e(3^`+ zhnc%3e8l~P{X70Xx&WZ8Xw(Q2!rU-*dx~#`p5{>y$D#?H=Yirn?}tCrMz`ci(pYw) z#K*44mh}%Jq?SjAr`Ang9o<^pY}G}S_iuWQnBZ6L@odjXXx;RSW#)fU8K5nsR9mLJ zKDghYx=3a!_=TzO#4W&c>ioVf#;iB<>O0e&sQYJX+O|dTol#38^WDw|)!k?-JbYpXsfWTn5-;piApqabNQ-6-wNKeama5yxGdX& zKT^Gi9>xHGl=B5l-_5-$ex8j!XAoh;;Q^4SCdtT`aq_f@vMYk^(aFNbhOxw-og1kt z#L)RO9^DY?7VBfZK;Ew}@tz*OEa=UwzojjXi>o`aNAJNY2Q{?q?l+x9rCeXAgmI?w z(ETE`#DCGY>@|EPBYFV~Z6y{pOw^jZ|HRIRP*;`ql<4zUP+BPyx`~uXF}llA^C0Ls zU>l$*_6F#bAHvf%ROZq^=ffVE#md{bbgT=EkO^bAK@)ET)$-8Z0y_# z`QE2?%kLL9D1#bq+R2*~>{~pP5x=rriVT|4)(&VIAc)|#7awoxZ=Z-+&K+PTGU}cl z|G3NO5UH_t7rjHYw(=83qj)Pk#`nu56pQ}#(tIhU6S6eG@MO7-qy5EB8jY|Nl*SV+ z=@$oF*1-?r2ccDykI)KXN54xzYsM{o{udyJ3~$L(_Oyc@iB~s~8pElgE6UFd4KwEo z&WqM=n$>z61PVsOy(otoN^Fg$ipxs&99Q8p;bJ~or!aPJd&ESvNF(HA407&w(xY(Z zZ95%}@ARCjD|z`@y zt*zgYUI4`B9B8uP*A48qVjqa<2Dcnzm?$gM`mMhocLo^Y@VSIxMA~2cd^JQv;JHZa zlpH~oiGT{OZu9hB41S3zDKzv8IbwX5)@iImTp~s~B~zvCgQwUFY}il4W&Lx6%gaty z6Z>y2+N*tqPgx6MZn}z;PU8aPMo#h%qzZnvREYM{CQ~($a({`xr5zZ)8mWfHnnR5JN6l(AbWX#;o?iq z2I2A1+$1U#XY}Xc8Z1G>H+h|=vf4`!pT>7`b)6y?8@%y#5~c%jXZyARmgzJT!c&~M zAfk*p#NvR4opD@evRrDc@LLR7XQB(nW4)k-g`x4I08}oZavq$^6L7V!ZQD>lfd9-x z^mm^?;lsLFma$(06~1l5$<35@2V>;sSj*5mIb0+Kh1{!>HKSR3=AnpzECa#GY z=%oU4Dm|WAq$aMvlun{XD5=qR4+rdrbw_380_VbVDZ$c7Kmid=`!fi zgPqN3!irESnoCJXKEvt&Oh4`EA|=#=VtxF-O$MvuueA&3;hie8Ja|2DIP+^|uKkqG zAt7Sja-6ewq_TbRQBP+~iMv<9}Z(6I^BM{S_z% zNN|=X#v9;=`+9P?Hm&r(7`wUno9XP>wa-=WWy=R0wJm3#)+${4pAGq2Q}4Js&GFK< zD<(nstfXgf`wB90TG$I~?|755CT-JP+P7CI*8_`V?&+#Qgg~@K0lfhFhiyPVHz$j5 z3>*mg>}9pGR&gUeP29fRJRt|31TEO)McUwNZU6j6!OJ*nZo&UqP#UU_20z(;ui+OT zxPz7MEJ*6ETcG-T*PmLGFK#1)_~&LY)IWbRqG|2HaOog$iF(HgD1l@tQZpn``}HJ8 zb1jvc=F~K2iefL3NyArs@;iMxWKAL#f}|;LtEFMVNSuG+)#0!i{^~F@abw9^h7~ft z09ITwBeoqhkn^#I#l3l7-~ub&9h}nwD_Ug8v4Ld^6n{&N9rUh#{>R2=M6j_N_m2ah z56(Uel}Pug*F?nQ7tGM?O=>Z}@o#i3Td4hdpLF%b3D!Q2#{Jt>Ok(RN72vH``Jk;mdfIo;1SdrWlgue18 zBn5v`y2G0Cv0Q)+lmHGa8km-_w8&lhVbBGqC~IgZ**}uv{}dlU@Jsbbz?#vX8@Biw z2pAyV!R0YPNRMtk?EFVsvi@|iFlyxA26qzcAbHY|MgqE172Dlnd~7K8Oz*yk@64)T z=`90QD_v#Sd;amn$ZGiC;S(mj0b9CXuu-d2y^m=&0G6 z^E+LxsI%er{qlHIH!rBnL*JKi!l$Z2OoZH}p~(HYlEjP0LSQC3wxaKO@o=;-Qsq(s zZ?@(yug0F|pmSabCjMth?25W*D%TPcQI@#bQ}$k*dq4m#XeRK z;6sNKwuB#_gJrp)(&d4#)IB48@3v*VA?n33#3=2w;cGr2CgV{S<4E$HV5S9_h5 z`of4lnyuqa>NEWt8EK~_n4P7IW(`UHgjKv(VaWY-)B2PO( zF5vo*xxJG0FFJxk3^3YtG~cnNZF5jaR_dr{44zW#-Vwv1;5we@?&gw7Wc>gHb&)tJ3kau zkn@Ef@s7?e&Mks^SC%UFgbXVi^PLyBjS z!c8dUwRP13qt3CRjRCAFA0Q?@uw>?aKz?zQ7E6BGG)Uh04B|PS#!(W<5`!ZD_@yNc zj9vCi9DgqfZ8`@-pU6L@qJMlgo*N~+Mh^B_G}-+YnCG~q6gg@l&vUmp;<&k+usQ6` zls4;hDDTe9pbFEXV(j{==Vf{tpI(`-g>m;iv)Z)e)T}GW&_)B0&BRcWNzNaU;lPeI z-oEvevR7$_lL2;Idb8lLrZt-TT`!+{tN!y+`WZDagz!(G!fYW&RE$R!$-!NutV#^H zAKkZH(_1y z3lFkcgudCj7|}3KK3bB5lvmQNa>C6^$FykbEmR1XyW;(`z<+(X?uDh7rxTHD^gz%3 zbyvK56KjW8>)UKUZ-G1AgpZLMzO^061TVh_75cv_@f87}lo4@Z07;_E(Lr9cWk-|s zPwhRG_AXBZ7SA9{!xiz`3*vn706?N8LXHuvt$Tx*k8Xx77^&i{t?j?6nGd5^-rN`w z9g^`wUI$rYm&7OqRc~-WA3>fAP8CG;2sqs|8TV|unA}i=k|SF}tz5k(V}OE8;PITA zKM?zul>ySPNeeT{Vx+_N+fE5C6(j#}|4(x*`Nf3{1C=Yq<2#;l4ux;{_&HRVR)fp$ zQ_(}(c%V9a3$}TFpU~jge~%46EBSTSi;?WxdK-N98m{IAO(jzwAdm1%Gs_$U z1^V)v1M8(48kBCA$M|oBb>nuKxrCAZ3yl8c`CjO)%}W(xMPMXv{c~Zjd^={X6N4lp z7zjBG@5=NBU%oFX7Z|{)n57enGngLlvQlEYAndvZ=rZgIS5SA-CElYDtDP!7bb3M&jIRxY{?T( zYN{F5^H=R8Xk*o)ZWGUXkt86A(0J(nHp5(24mKd+gqjxY2lQGV4j7R|O@l5oX&7!- z`B$4g(1=hH0_jFpO2Y3nmj*~T(oFu%!%f$Y)HZbRHUvA?k6ywyVKqUdbSc$8J#MGA z)LH*M*bSz>H|?ztlP%gYM*j>&Hjj7bQGJt)!Ma|=Pixp@-lb4-P25Q7{Fr^?SnV|U zVx9-Qflc-h6@3F34fRwWb`&RzTfsgBjWlBIYSoT2;&VIcXzy^oF+@;2ehoJd>pW^0 zs&E@zb_`jdlY_tTtc^c!u-56Z{#_XW;_CN&B(mI9+|FM|uUY!bo0P!ik3hr6%NXep z$+))|1G-*SlY0&-+^%1V<^yuh#8`p^E%>R2{n{4T(gaowe*?2??+r^pZ!NVlf z>mLjGo{y#^@zT}86#$F4vomidQh^!w6bb8p0___da3%{&Lvj7y#3C6q$JPCL)cL^Q z5>jL!Bb|{}lkvGlU|O=`T4*xk)?kcOrr+tXa7ct2qqlOj_p*NX=Kv3MI_>=%H1_R7 z--7lcb@urG#qa@aFauRo8PKt_dpz*Kod{^YE|LdcC9=ufehdHkDKYF-TYa`&KcmPi zG_SBx$15S!^jn`SG`!0q{+1Q_U27ZCdRkl|m;J|%Ff4w9l&NG8uTJofQh_uxs&GG0 z%(JOy|9RUuF1VFo>U6mM2pFvNvdX?2LAKV`3stjcY}az_?ycPgnneXv*ziTN>y>e! zM^jQtnHBkIibAeU7D4q`=E30WxCR6os|oLTptn?%weq(fdJ@4|d~u3-yvcek2c?yGb6aq& zXg4s_=I*Q|Ty`8Cq6SzSqBA+Ce|Z%5aRv{RV$oR4f@>5DOm=*V80aAb)&oe|hlmej zr;A=MVq2bDdfA%|L+L&^Oe{rQd=RNi+EDFzYcC@at zOsorPv2E&{)9`ECOg!w08JiTS!T~-kf!11 zYMF{2Fo?g;Tzp7|1}Pvb(K}=|*&hFZZ?)lc$3VMfCvi`9$7X72yd$d+n50cc1lN{+ zy*a%r1Z?8f!tLn~Sn@yZ7pm0+3YGNq0MB$JDU=SsyVxb}k{MhprW^r&w-5taG`M1X zym8+l&A(+wZI#k9EMvRByQXf_RO^Nezppi5zO4!k2r##?h@JN zDsEHT6A zepUD$@m}~`pwm~hTa@abKyeWby)rWrF1?-Ae`s6TZ!frWk%)6^(Bx0^Q9}3PPsy$4 zQ##4Kj|aQ8iS2S&TqArnQA!wzmL#7bBS3l|^Y8F%N;udmW?3qTc5&WE%B0M{FJ06W zn}Uxo&%I-#5Xlzmuovs@TYaYd6;;wJr<6;%=3L-~aT~F)(@yny`s(MQUdYn}(A{O9 zPl}1u-%dbqXjv9@6LHQxIB1+xTBS9a5(;9|DVz7xheD-lZi~t}Rwm(gE#1AT~kjN3E$Tj==+Sshs0rkB!x&6Do$EDoQ2~=-`=F4Yg zOurU5?NL?@T&G6nFApdPlOf5M?3UMR%zIx$>~qR2GH!<|XW35FJElAX#d@BX<6Ngr zP9p?{`rMIIHuw^^*BKs&#%4k=!NSuZJ7Ac)U)mUvCjiffd?GB(hqX31O@Uk~fSR%l z3j!WBA~GQQ^?%yWfK3l3;go=$c)YB7C9;^g|GDK-6Ky^`&K^=M_j=!|r1Y$D;A{>m z^vzvh$UcOBnr77EITL}oXo}z~zt)^cIDCH}{zPGB18*Yww3^ylQEEt9;BD}u1Pq+zR?mB=g8Ys+*ozn05stAE4S zzm&L;QMPj#7|P^%jFvRm+UHbJjf!tpNebtRR#!gxQN|2A7Ex`U3Hv1&txk0&TjMp0 z5v|T%Bi8h@dU)$hl-_HG#w%(6Z0s|BP*G2o@7yP_s|k&Z(;+2^Q+@Yvkron z>eM;nF5@5E0?8-XOBE>2(gX{6tS1gSoF6Bq)OuY)iN@TCmlUchS=$@`zNf=bA`*>; zC3t)~X+Si?G`%w@ zQ3#Vny*HGphV2^U@KKQPODqp=+GRMNbNo6S4 z+?=bL=)|{qf{gg2?o=d#{lK~mikPQ6NMbh*CEafY}tqC~Tm+{l)~$iM+{;F{G`2|qG=UV5HlzO#ah&* zV+>QSpP}}(Y>UT0;Ps}1Au9P<&0&M(#{JMYiLdOu9)+am6znL?EN$U_8taVr2i^LJ zd&RDaJ1T^8%o5YT0`s8UTzaKNGaHGzrIL{eE;b$4ZUCbNZI1Sc!_B%$UO&Gb!_!p; zcK+G2E05z@kMr{ln=FsA=0)T4Uc|t);3DCnJK2&=ic<{Dc=2is_-Y7Zf%2(-}1Zf+m18so8e3L z_s225k&DF!$0-jTQd0werj(nDyy48I!TH!UoTD^{G-lXC`yhDdCgVKGm&EyWb;}`> zxwX0cp4SJ4bu&rCJ+mGbm8&%xC7Q-sI@?OC(m zfzOfEM+3AFH)OU!NvVT3X%TLh76m&X;93sp7a+=D2W_ai36sBr^;(JFSk4WPi}SjI+u?yGjIx_zjMOYNHZm*U~ZC<8r9gNtOM2j#Q%l}BD) zlcmS^KN9JIVqu6ov9a%S+o#;c4J5jnA*kp{@zbNezPY&!n623s2V{)Wjax-~0&2L=Z0-FXo)6I`zua+@~OV3oP4Jz}%sw_RxLg#lf2CHH_+HbX?cp_dDnn6rI?KK)|PzLOn zMi=%NCrI{SCd2!!ZTvh(XzsEMH$4D(pc+Q;wo(Q)W>GxZ8K=#T)6P^jAnAJvRxpnz zUAo>&>GW_NT&~b@9_%yX3nR-r@}8xfX`XI#xV@?le$&Q#6h$EB zXEVw7Ea*7ClIoqS{n3uv{=2zg@xjZk!FjUt<^5=l%|x!Mq3Pg`3(EF0$pF)pm=OT;l`yb!lNW0oXKp;$Q``%`KZWWhpc11{- zID*A(aofhx;j;uR3c|?ync;fS^;KaQfzbc+^_pKt+TjAvG$@th~V=3LQ5pXVt#qbu>E!?&Ckwz4iqqO zdqlGuR>BOM)w-=cYUTen+oCKwWE6MiFa|OfB@kcUm^v7^RjMW?EnM|ZtaRyrM0yoKAn>dbNkRTTjm{$a3g_3PBj59Ae-o%ZKLu#BE76-;x zaP<>q9(%0ewqUP^mULv_dENMP75^p~xb*dr{S`XHV#9O26%}7CO=GX&H@j2s70?zIzQ3dpYOCt4qaxfi76EaH>E);tb5wyH`x zuQzUV>SE-NXh^KxGAH_dXWS{)e=?`X8T3_YiPy2*PeWB(o9mTV@0c3X3U+ZSge0fa zt(tthr-t4cPCP^2Fk3QnM2H`I5R2|lVsS_;JsjdKKmc#KhwTs8zU>ldlEC0?@pFXIWh(nK?Xr>@+fU7{p5eHmU~>Sy zE{FY{aMUA4SRKhS*yH(=UZcQGuzVyP)s|Dgz-a@s`f~Y9jvVD`gX(iF45qWD1#ob_9*OgD%SKbG~mkGLLQAJ zAVXV(NK^>TaW!4jH{V;6pAAt2im*=K#TTo@B04#_#h8q+9_zjYO1wsrqghdDsB?2f z@=Uc=0|M=QdRc}~b0oe9v?YLL9hc4-yV}9f>qeA;ZJP=V^)e(iLV+88_$`Ey(`0IE zV%1*IK@GU=x`^vyCu&4o^7k^}E}ACHL?>bRGl9n`dR32zikH(Q25uYTw=N2il)2ww zZR9`|<(+4U!~?AZWm*UIW6jRKA2Via_|KdCOhfHo?ji8a^Fa3Xb_G{jU|MKtGv$$J z*q@Kr4aJ+gF><3PiOF!TbIE>)j_u5EPR+I8j_#%$au{H;J4XL%y(l1K6w@5@ma zj6l-JmQo%jpi%fc5$~>MV|#eFt5!xh&&R6?12ZTZ%i_`OQB$h>P-c}v;~K?xIZ zO!oG>8+Go7ONP$lAXCwdSl5NAcM^zq)CiF zjL*w5XE(P_iJy^#GLY|*$w3u8d!E-s;s22!Wtc}?9_z6@w(rc^w_fsC52^(aUO?Bj zU4=kZbEN7{OZuXMHNJ^!s&%F5U{6=iuFo8(gR;IP#*GRTu&0@I`B||s+u{O(hW@mL>#%eOM^_Ww z_4xPP!^RajCA)7M%=kSKMW~h{>TN~a906wST*&t1jrTnWbdVT1ys1trDY-l6J?_HZFICbM8k)$;%tX!o-G(#lQ2Ah1}MCw71r z)WAsTH&!GYQkXT~ii}WI*udk9S{05griCfhce5eJ8x?cr_6O}RYM3iY-qfu^6wu+b zpZc2C9;cc*`Yl{G+bVSlRPFoJQ)Wzahpp_S^X;ZX6v*?vo&0tMDdhogX&H1Q9y4%o ze8*IU%>vWcOWXNKh0NyF?Onpj#I^X$tg74mW3PMq*FZwMxJByNL0>D2knZK5Rz5-t zXxgF7$n+M;O#FQUb#|o-s@?_hU!sl!$6_5Hek~y4AgZ%D(rG)mN~nxbW^RwaC$X;D zLqqT2x@);`@O1ghakK%}=%eZk`Rn>1YqD;_jujMUZok7Dr;75AE`1xVC`|^Kdh`Itvv~^1Y>!fg zELSd_k-bNGg1Up0h%T50rk~l1ylTKjZXdXqBk& z`3Q+BLz0?sO}E7@xG2mmiZ6i2Fe2K3Oyyl(Rqji=T>t|7KVf7T0Cj^zXEY8o#Bk|v z+u7Tq_QeBS!aTld$!d7iW}99)iVhNmR;X2YCUEx8v*V-T0nxX?`qN4!cGt+++da;> zQYm0rYvJyuH=4lGP#Zz5cs~pD+}Mtc8y4Tz8%cYA9x;%jO}y1f`{gpcqI0Jwf-=vZ zqNwrgHDut037+kT&DC~(Ia#^TAkAQhl(!a!K&@1As8R%#$~;sB7H1;;8IVkR*t=b9tLg*>w5NyM?((7YJW6*Fh<4A{@H@0DF3MH zRg14_#P8n&bESUS71~Q`H@3##8e@m@s{S-2MesB%49A7K6R}}cTRYtgcmL#kkp1ql zt?%~srTDVJf)F_HjzHY_&F}sp@w4^enUNsz#fQO^5bVmWI}>Zl(!c}z%kY(qhrlbc zCU!A4F)>u<-z#NlEJ|uU$iS`F9qxdmEy~J1zy0`S-HM)@>_0K$UTV_ybBB%ss)CX3 zXn53|KMqb8&umjiK>T}se&ukcq>4Mro58UXFj+>sVx3!I%_YBbB zBz>i-#)+S{Je{7U6OPGju7Gf#da24-Ac1_4fcMj(+3B(ANQSj(#GhX^G$NZF)^^RuIpyZSN_MnmN&DT50 zXdLMdfQplsNTdE z1~3*TzR?M6E*GYsTQE!0Hd>Zr+}`(J^cRWtS|MBe8Bo(qWSJ5dCEe;{xDxfAf8z6_ z{n?INe{elzH77O}!iqz|sBm&(ZH&cY$5 z>lCCTojq@sKBP0qkJ2_6WG^i^@I#8s(G6TW_gIP@2a>;HgCRNHwxv7$3T85a(d3+; zK5fC5!{25q?77Q?GU=PX9K5^^R!&7V}@X;KB@+Us3b21$i4rN1MpDDP+HmK z7aXc*rWQJS2iS8wABcfRb}++r{ALI|dn-nkH0knU>=&EP%xXHpkuv5OJ%^)`E1{yv(K#EP_gW>r4;D;%W-$_ z*hV>8<#=e+)OM|kyg<-l<$k}K%K)=b*4~{oOkVYYb=uDBNB$v0`Qg81ebGj0dr75%$GQ|8 zlsXfXH#4$HdZ@aZ0)t*IZ8jL5y({!{|B(t*F{=OWmUE@(uM$y{Z1DH9dGPSBVavWR ze7dVtmPiNh!LAUIh%i`7r`V+)YLz*fjIuueLzM7AuQl+<*GhENle15WP59O52kxJK z$*72anAGf~Qe(9KbVI7ia(%r(Lh)!yD8jpiRK+?#hqw}*z-ds5?Dz`IpSk;aydTQF zVgpIrd@)udNrf=k>|)etRPRqgGGCI6)$v68kw%uWVqF;p2vKU;g}l5Q-$3T$Sx2C7 zOt9~#u0aN5nkHsnBkRnX>wxoYDXcr>9?LG_dA@Cvf z;^a9XEZZ+|`br8<<7DmPnu;|FF%RY~F4M_0ZFofvybF@qT71OX$G}S3x3zS6u%jUK zQc+`#H(O@nqZTYc=G8epju@Mc+_wCHpBq3~Rw9q~_{yk$R*c%8$*#iuVdgiBvoC#P zlL8_EFRA@2`V5yGiET|Uuzz7Qb};AQo8r+^v#?CGQolZ;^?tHNY9&h&=M4&4#k9mG6yc1O4|$VcMvfK~ zbb=UsnafZ#&Y*h7>s$)Y;2T(L8g*r7cDkKHr<}4vI_KXiG0)(<^Y9YQP&#~fLuNHU znliV(T5H08=~sf{+AR@rI^guB{5CdjKAfRj^C@t+DAfl4Vg97`5RIVpaaF|9mGLvl zNoJ&N9&;yi+3hAm{L-HnjkT@@c3g~TE48}wyrKMcu9&O-%vBwbZ6mnCHP!3QHdCGG zl-Wb4P|arMdXto0d{zU7PFc^9!(T;#sdyr?aKPJl>XLF^rZz~g#jz9vKUU${t@)Mv zEWHC4q6HV?TvOGDl>Ox6zf3(1>_=$MOnN<%nMs^~xJ0jqYCQRYH}tf8tfnO04ADCw z$wcXp<;HJ)rgYCC!QGaSHF1S3;XH8L1Ahry`(MW?VPFE^_qGW;{B~07dKRPQ9D4-R z6SQjbWrU*kcU=m69G1-|X4=$ZuBI<#Pq541%EDWeZ6MKX!?}v|1a@)>y~2%ArdO@* z&CwO;%`?P6%+Ac2!`KzbU~(cDIS5rnT}CgoNycG~e~JcXDk5)Cfo49yFN$c_xlB(a zU{l6jU<(3zjf`hCIWj!Z*pZ|2*?xry9Mh!g$_1PN)kMA34Is(h7$eUb{+4FveBivV z#N4gk=IFRSp{U)UK8;uCq7$_mhSu0JbltUVHdO{I$2bw*M1;o-C+tS>E*@|VvggrE zSbXws92a>P?h{Y4aZ!HNP(me^@T(f8O&!rC{@-OMH8-5;GS!^FpfB)#KK2J05iY%| zb=jfy!d$&}&=dL?(>-lKWo#a&U-d^|TeEekrhj+yW!dcrul{b_a{kC;O9B9nGE!aq zXK(K5deLx7gG-EGQrzCXs`}h->(~(>`YYFFq?EapG$evDziQf8>*K}cxg6{cUBnKh z@m;_Q*UcM2`6iwpmsSf=h)JG?ccQ2yQb_}Jo!m|$o)iuL0A^?ejb2|mmX$G0xJ!+B zJw~-=CJuSO7IQ67n6K=g(_XQmi+X=cW}=*qb6qjb8W7}!Fx~kJD<*4sFhAkF;;Hw| z?b09@;uqyN)j1<-3wAi7Zk@7M8(uj^2Sox~6fOUa;?m~S!PxTVG|6c&*02~i=ese^ zbKDo-)Auf6@oT1425p=TY1S_My}!#vm_~E!JD7;$ue4Ld=Q1GcsBQN?H=wl0MQwWXIjG%8 z%*LKMD0OO?^TI8=Jp#6mB|1qVZR~7*-Zt;3S&*W;IOy~xX=N}q0PSCgp@ycuhuJlu zjgtuAw+u#-vnM-WIRz644mww1jVa@{NxQPN9cG(8<<`!nt0FrDU?QAFwS_XgR<{!6 zeObgyVgh|dioBFJKZl&E70bnKX9gY<4j>-j97`gNRBY)`d*@P68KC9waindrYfKmD zALkSE^A{#9ur_{_McpFl(cHg70D)Vwu_lwTzdmgINXWTo8V*f_KJT{nB7@ zMqL=B4D?HBsE|mzfm$PjsOS`?9~LT&&%IPr&gT|*&dSlEiPVnJNBgznCi?n=S{dX z4|1PPM4^I>O>5>U#kVc~37IIk1V@-PLj92n97~ZEH%Qqh{eNE0v{;7m_bDweeOf?~RdqAjrcCQOR9 zOjVhWm~C3@xv*9>YQCSDv-pAZCWE;R8kmurVx~24h@UWg4^#fB_)kzpaRFb^*=B6p zX$Q#EsoR?5=2KJ~IUtS%>~HA>rxVM(*XN`lH3|PNK2ky1HlqKkX)3yRu6IZf+54kt z?36W$fg0yB0&5aJ>xuAk9+WaW*Ok`mA5_tUkrsau!k|X_no;dLVEK8riqTn&Mfimi z%86S>zpPX!VOD$(c;EFo3@4xFA zC}x6e5ov_E)=-=--v72?pIEdL_~^5W#-B{=r5&W@dpfi0!5oWE6~UeQGnRu`~;)Mu^ z4W5n9r*)y_L3Z)vS%;FZa{BuOuzb3qyN_;SDgTD5?R;8T%>7$(JSvjaPbQvG%ih6J zlIUZ+6oC_(zT@C#e5X~* zIr;K2y{J!GwO{`@rR7=UdeZ1VHMiq#WwsU6K{STTLW7?w3{T;*G23t1!GU~vYs4Zg>V<}cwf-~*8=)+-ekG)zR`A{# z8#wM3j;Uq*9IoIOcj})Ds@hli9lsl~`w`9(d6R^7TV90q_F}+YUbR{z0FFWxRKO4! zS^sD1YH%0?mh&%WK<`U<{T4MW@zZpt46I%--2ceD|sFD-VEkFWIX6nO?U6|?V? zplzPSG$&(f(!Li)M=4fOb>b4C&5ftc0=cWIgCYBBe*oOACkf@`y09}!7mq4vL?JxS z#aalzqC)B7lV+lcXk=1Gu{WxPqRVNg4g|F-bj?vx(;95zhfpV!VJWZmSFmb-b}F@C z*Sv(bef+BFc#EY#O=&9m_a{A?OcDidO2VVQ*LFiIf4a83)Uhf^zS%uAU0_r z6;J_cm)6FwQs^tj2`Jq+a~8;f=W)@k1OB=TALC$zzyaZyZNhd!ek$KkhXoeRfM6(Q zs_i79uPT-s%B8|Wt~5O3&Yi31zMJ81)Hp9FPT+1nYzp*n_jK?em1mG*0c*E6xfJ+G za-ed4gilA_nV_8rP*vEP3PMqrz%O378K;Qq@_+>U{#cX!%}s?7sy5v%oRJ1iQ2a`{ z>)qHjn8=Ooc8mRd-b{GuozDtvFCL$C1;upef!fT%3Iihy?z-997x{ffQ7lM{8B9J zH3nOD9sh~waq97wd3FDs7BmUi?$Z*+iuGe&<=elmPIF4ksUhsDU1-FJ*Uy_oV#R8l z2bESi^pvOc0<7s^N)yNKv=(lqw{TjZ|sYk(K(mQL5M zyj-i4&u&U62U<37(>=X}{NFktfBgA4_=H_L4=)G1}f)O8aoF9#{w# zhC}%YXzBv^izI5`?tmIcboSnl3oJjW7TMQByu8=LY6*pC>?8jM>NiaLEHaQ2JLNU; znT4%LuJgOgjD-I?kSd7o*1RN%gbz`66EiRlrEZP$1MV!G{sMx%1Ae(`eq0ea0~__H zCz=TVfM={wSJFW{ACqFtlH-&@zO`2L^52z`66BIe$F-oy90fMHdSfDkaMjfHx*+qFg3t0Mh#fRo zp!X_SC#JeTUa^n0Yd^*2l_(4DYB0+-j*c2VybyhZz|c+JGq{z`h{4X6t3?z#0 z$<%k3z(hNeDq@QH!3lk+NZ!}~ZIH4S6rgaSjG`7qI&E%0uMXg6savWvG2<9%V7*NATd;Mb zVxfHQn;^q4q~oSCVf=&jHRLp#aGf#G@H*Y!QE4U3TR+EbFDY@)et*2ba{}6`z=vO+ zJdNq=yg&bUQG2{dCQ^^mfyqp6)o_P%Q72r6<&-sEUC;_HC-1q#oa+~HZzqZ^962P2 zFn?91(hh{Bzh3Zn+j4~xPXJzt2r1=Qcj0~{J1FSlc{w5L#2@j47f0h36w4*rA;BwF z9Zg%*FypWbI40j?Ae{{hn3J{$9=GHl1=U1s#rM%FHT!m*GOZh z(ot@`??IG7CeKWWiNDx?8@n7zN-yWb$dn;vl~fKFb7q;2)GE|_iCt1{lWZS(?&t*7y^5<7FQi>XpV^jq>r{ihTaVR- zd9Gm~jG*I}Xb|ED#!oqo!0#wWVXo0T;_iAoTPe^saayK zBH1Jc)!3Y(j8Fw?>PC255qozO9A3m(LWrl$92(1#q*bXC`(5U&oy@Ca*Lh2F~X7IHuW=fN+123AAR2NO=%O?;BUaC+g?ogGS# z3+Kd&xNnpip{$`#w$92;?Qu5x|IVbvr&mcmUOfa`!GVNlA`KUHgy)W6tZlNfOkdSV zYhXmTH7$kuh}AF`bi@f>^OvB3x-9(ikhzE={KhZ71+Q@yDQ)eFXu7*PgNd@VE*pAy z6RIPA8yEVWfrRl;*Cc-#1%6u&h2Qx_v#9anr1^cX!!2xC z;7}OQKAT6Ob?hR3jYXBADQR-}eEH(sQ!BrcLe-!`V(j8 zah*?b#Kt?5dx%w@fmx|TI5swbL6tWkSBS6>Lc&XQ1ar?aV6 zg&Pil8?l4iBRZ56y1+AjpW(E-N7j?e74EEGSA4x%k@8-brAUtrv`1p0h)aGyYg;Z< z@yqr=#HnnjoO1e4ZRffqC$sYQJA7K{zt^H(X?z{OcHKco#@_MBZ6O7BCd-*CyMzD0 zQS7%x7yn_gN5-kqaTpW2qKoTKP6b~vX{pm1`gyWIjIP5Mjx)l zZR!KkbxeE>N|D8OJYLk9lxg3QRo<~V&XinwmQ&nj=!^!?pH{aam_B0UR;PoU)c36_ zCMn5hkvY^kPq7p#feLd zEhuWfVGCKtcG;)j)sy zaaqt=q>lJJL12@t!PSBI35W<5hXu-Gqw%=4pG4xeZTHiPu9O?+gs?HWcfB&~tCSfHtjlTh+DP1eQCt$d)RzP5ohk z504_W(bmqNYV$G3^nnsH80vlC?8A<3p{#Pe$e)+bhPXex*HgfcZ6#M$MHyNGzUm!_{+=X zIRifB6fGif*y>nJ(w2J~_X@1sfg8f$bmb9jmwwPdgVl7%Nh+xfRICRd99Hn2lZr4| zgrQGExB7j@t}us3qP;=RbaLwS5bBAbY`(Fpu|wPX3*2&@C5x_8&vIX}4N12J*(`Q0 zN85=Km-ubjHJeH3R~}7^pZnBvSPeFp!FNlh7k=x*%PxyAUq@NM&jxNLO~^Vwfd(Du z)tEt_(g?mRfn0F`2i%Wnq*m3t7Xw^A>9V1 zyLBSePYAFvoA9+O#>gBcW?@Xc0)8p9Gv6e`uxkpiHN-vFeJ?+REt*`6_DGfBA{oaG zr84QbyH|1>NA9~Wb!WT1@}sZC;)BHha^~;uCK`Kwo1pyj`tQ$#_HD?tzvRezX=29> z^yf_H9$CKEt>fRBppHp+`rDT>c#xqt(s&-<7$TYIzY!B^;R1Bp*&TG%{K_w0 zOI!g?hh2m_=o5TZ;A;2J>AQ36#ELHkXDjWW1IcBINA2iDfN?c`n|W#WrQvTM&!u2gXc4C%O-eWHqM_9mGaCkk2Uc339BVRSE(SE0FtY6 z?&ZpUXt?Inc9VmUWZ>xTh90&ov19k$WGujg=-W&I7mP~cZiiRr;gqL24N=>14$sC~ z?Gi49s0YP;2K=JWY=P(|YJWH%)vmP#WR8`0eKUz!{QXf97TL3O7az~;`DOo(kV3N_ zjs%^AXDO&>v6r@jd5n9j3*P(eJzMB+UT#s@S*Uz&xMgzLs~b~4`BFKF!e`JA`y%|b z>(t$0kAidro*XP7w$LH)N@nF)XoMgW`{9JXc(Mn#yz{W2ny))Bz9aT>c7K7dF%wlF zXkl7_zm9$WcaZ-KH^n;f{(0ZZ!{5y6faMk8V?E~sc2O0OS}q%JU>a^@PLb;e)3?Q@ ziwB&%g!{YkmXfyk7|iV2gdA1wafJsPM4;YS6(gt_>OX*CWb~Z7}Hwcpq{J!gC2Mo~U+NgeY|hPa~SbYq)e+ zzj8LYU8a@4^>m zFgJ=|+|PUAMPYApSpQ|;^aY4s^BU`HfT zsZ0v?5TEW3PjKsfb!TQxhHP?V(9kTM8wzEE8%jFmhS_>%+NAcAZEJyvCD0&<_~1~S zVuaTQ#7G<9ci_N-8&ETZUPjhoV>*Bwv8+m!l)OP*d(Y1D^7}gI``qSL)OKJOT%P`? zdfd&<`K^8Aulp6~Fa&Eg&jXMVS*xF;9ukV@Om86YznSoW>jG!%adzW!44pkwEJ&Nw zA{KKpZ#h1m-|ejMu`Vu!lOylPPC-3-xzKx?vhF<*VDs*WH(E&%nIpx9-_uhkSL#3H2WM~b8g~5^;#3RSmqaenEFQXQ zA`$QB&~7u7pS<^7F+cANyr=xt{~%ZUn&vU2$UleNeMNo&k5ByvIaLWUv;wSA485Rs zMH#CZL$~mQ2tdr3i0c=?I0Z~yh!kW znDEyxdiuUV+kejw_^w))yG^-tg--#^P2^FfkciSdH7@h z#8~*w@RW9!#*)P1z2J*R{SqMA@V3)8W ziQ06fR#jCo#0lGAQ%?>;qQINCExGT$2%+wb$mXfgV2^@4SleXCmG{J#NE0& z*e>FCK~&D#okBVyWFCb&r-842I$I6%s210#4SCm;^q$&qmMC^WF3|yDzNeTMhJBRj zs8XvceGx)OT@q;hy?Xw*<=))*K+l|X|3bGz4u96@!#-6Q$)}HQG6)Mn7}n?y4WLy8 zn+x4P7umu&wX+3bKH`fx77~heLfQy(1q!W*V^ykk{6gd!1S(MsAexivX`hkm*8p)> zWH7JqX~ftW+J(1=uZqgi12IHDEV0GKtJZqpp-|u`&>;{ZKjkN=r+m2_IyFy{=+yUV z>POrfgXaKYzDAvu#3x$!1;>JTgpt{^Xp~S;ph6)2HpkpjkcbHK*ZCRTJ}tbydVl{QzdwA!t%Y__e&xeK{&S|heRgZ1|#M)hi!u)#`0Wlec>L9X0QF^~vh3nzB#^xaI zSc0mL4s+E*gv3i{=0SfwOL$z!NQK~z(nR^lgh51^C<2siu@>^k;CV`1BlJs6?OnOF zmd?L*tr*(}4Y{oE-c6=!%6`(!SF;d>)N~5yA%L6ae4=}W&R^D(AgqGhn!kWQ!q6167t^a7vL!vZ*JZ|Ynf$F{o%~y~{S=1dhHR^1` z`fKnU!vt_eE8t%}8&}{XB2EKnn%9@GATFM>FCSGcll>e&Rv{q@qR#OZ1GnhK6l>VPt!u z<_GTJ{g!bzG8)hs2NpwkkFp~;HCE<~$Y$Z*Ct4b#tB9@Tg9$G1@!-myq&%c;1M21hfFra=J34!E% zmplVs0-m+W()EW8JhOjtXF&5hT7k`$;jMuHj?E}(fGNh2XhmC(VX=noEw*UL~-ubSM5qA*h!d?d|O>Qb6`{f(SQtO(sei$yT}h~znCTQ4cQ%3}}(7G8k< z9K2kj$C;SJQ1VG?d(Gv#LKeY0o29=_RuYP^eluSn4DXnZ7#NOFAUr8i70g^8nL>qg zTfrrk6UEo)>WZ~`uEFVBswV1uF4ZB^- z0Esj8^`$}OO0}YDYp0U^`X7H;+7h4NUi@&EfB7Rq3DEQPw{Vt!2!{Ss5+Kva<-p%! zerK470O71C>i1dh3i1JXRt~xa1ov3hV~d8VkuyKy>SD-3D^xC+P9`W$c;y%sx#^qd z=CI=8fHEiJZ3Fska#Lnqv!|~fbq{=o^pi@D+C_Exg`TBm3%Y0 zl)T2n|2t#22meB3`>UY~1~d>qvl9!ktU5k{5!I$o)Un@5qj56TOOoO-WdF=>I*``f zNJQ*AK>F}kh?AWj+D8%2GdPo6H#X*c42En%AZ+o1v!rq*_N>Y?pJAmqk^Hl8p~U6- z0lz!LIHrIBjeMe!^PKGLPP;X}ted(C=2m*P&O^HV?B*H2+d}DO&wWpd7S)}g>t~1c zl%2oHLmuuZJCt@ZNArwh*IRDnocye^xj2ER-da~Dj&FJw8!pZ}=L^pEXBYzJr4AR5 zuP8hCFaADURe0TcR|W*!_*TZCR3IY4Vr>1Xm-hE``Z`}Hq(HycR7}I`cnL$ceB7Yy(rFwG=UbG_cN4wK7WUXT-JVrO5{%P#?Sc7D zMtt{o5beF)&Ooa*S2D`4WX{q1W;X&}t!$>mrgoQ?z1z?ivlOIV#Jj5=8I+(!3iyVQ z^#K2*OJXGZnVEc~03j=o=j6Oj@uKD*Aiftl+t!|-nKzF`nTHeGMf3dDA_(K;ouJaL zW9qj=??hc+norkZQjza404$VX9XN0=9&i&e2*VqJ7Vh=ZX2h&PD)2t@=)o?;BnXWo zCx7-*{BD&v56-z78^Y5yj$H%MlwhQQ7XfzzKhbC#4O-O00wFcE41rG~&@6&LRu8)o z?iN7;CS8?5Wb6%g3)s$gJt6BR@^vKr>n3X3h!FlGlN6vniZ_ecMRgJdr|*|YwAtr| zFDdd>gJ_8aA0>8Q%X_Qeeb&2hq?|#&I97}bu_jijyX<9kt3QHB8t9ET07)DgDR)D- zs$|KNPD`|wHEY3RF zmt6NA*gFc&aw$(6tX7vu4#A1=pTR%wXI$E>#X({XNq8O?kqWN^&PA{Tm2LF;g?tV;f7YsG z{ghoEK}da*e7By+Zx`f#2lX)}p&`>OnZZqhu2>SOEwYcG$8WongWQdwAev1F)`u7% zyEkY5iQP8o)1l}yZlV>MWuN-93gTwZgpp1;UaxV+B$YYPqW+ZM8f~!CfaB;uL=+I* z0K~oPNeK9J#I5<8N>87-GZHa)m3TpuXs z)ujcx$Ms}!d;@URhN0?4s3CmK2=umXEA9n+2Y3VWCY@;?*+?dWT{yhU00Cs&7i1Bc z!=U7&ZmRVjy4aOUs0eKFXyr zVsXs_!q{LLJe~a^!&VSB&1#ESRDaWZCVPL}H%mE!TxroMAokEaJ99kaiIb=Fybv9& zi+HpzatyGwVCZ28Sgcz}+SE-`PbABAevi-4h5fjK6U1x0a; zOhIZMn$nLEU9-6Fs_5j8Qt0!*XIxX`IP5!3$;F(;^aQ!#?PwE^`2%^h|&FdFmUVxU+6xkoGSB+S7vTd ze3A^UAMqdft@{;tmLPDE@3a*1*y|x<7ado`g`|ACKFc~uUVr4enJ?RFc@KkM_V4-M z6@xf5GSaTKC4fy*rBZq6UPMMUd&>btcFz;8&2|r61s?B4BJ!sq3@}PKDa441*Ef8v z32<-*rut8ub(I-`^grX)@D%+H);AgTm@TkY>w&}Yd;XJ3<*|X~aik1>k5jwgr#VT( zb-!f)99rE04UfL@(T|*1OO^HpB(`OZo_}!viA131XhV%|)4l@1gAD91|NUi%sg@7l zvQt}pO5X4CIG{9*5Xzru`u}$6J<8FLIW=(7T1+U=MBfEv1R$;$K^MFkm3&E;>_u3L z$KhoGisCH)Ppd)`OzQB!Hqa8Fkm}UY@N!INuTeqXdLV)Bq1S|sljTrhpmg+y`u{EB zjWaK4<+?5saCDm?un_2O6iO#)jEmRM`fNN+~LluI+QV0-LpPKbAAHYB9rBp z<5L#9qUYvEccu%4S@Cl%GmF3Xw|8lt=?C-?kP_<=U)Jx?9Hf}1)_RBCv_whj+q-en?XLKWQ6UC7Sl zSKQ}FWHe0aXpm!Ldn?J@l&#y`8$ZX>I)kt$x#$%S25@+lFdnf2yuCLiQh(BbQslbzx{44L#yRqn*>RV{XfaX_l zm!9X0!C`ax-#uhgnZRQ*T$9T?{>3%chc)GY2ZKkm2r;~_2LPKu*-YUlTM}K3UErdQ zV&AqLAHmCPw4YQW3htr0 z*6$*Y)N~L@Y({63#h0E}Zga6dd(3txIgv~|aWE^kXK;Wuu^<3VboBm53l1VBZbc-j zvVT3vfr?zRU?@Lu&DI&7Ln9fS2Qe0QVyEt5K2*l+A~dbW>~t99-onnpfUFi5NW$I1 z73m{H0Ut&Hia03cYr2o$Eeln7ktc;Mc(lNFv@o#st)R)FpRA`h#x~Q@8J#~26S6u~ zL7^NXM@w88;301dLfYs7O}jVj$D@PRIhS8OfjQ$Hz|Pz45+>qSzMV}GF^F1y@Dv@&s=~aCg1kzIiDo z^R!~ed!Tx9KcK86erc00{ui82G_2WT7CT|&p!6^zpR{}tF>%wt(0*V@IN z1d`hz_!P}!Z76`wI7(YAtewTFjTdP)cbUWFVUdthu|FdCR5o>g z{V++%)c)0Yr#3C|z_8$K`%vre%`2umlMcRR&QO!0IdAmNEC8mcQTd+YPNMGFpCWTK z9^U)YY6EE4y4y+Yu`XR1A>!ja?t3(gx4=*y#RgcColN*r$uP*|hD3r71=Jgo-e#_M zAohtQ=FSb{QAKb&ZWJ#)(=LA&oN|<|(IlyKwEGx(@gUqRrU}j+>$s=~?{>f6*9P0d zoQOk=h2H903~KixS)@w+KvYfdiMG7|!IWju95hcYA}_>MKno{wqnT7*O?Gs<$9E{0 zGvpXLN230AuU~v0ccK>DGdsF&lIBhJV|6@XcUp}YQ#!Os8q zTe9fRS;d77CJ;dP`nPTRG3Pos)2D%GHTJPfZHME!pPgxGQQ>M>BURWtVi+JL1WaQ) z2|KaLD%DImLGq~G@ed+jyQYx9CMx0Dl-|!C=`O&nde=vnzMb_~(%f!+l}YP=WC7RR zYK8bbwnIm(b}`t+XFx%;EgP}1Ybq8)NajD`s~fmq)i;V0c={rmO1LN$4?tDzimplO zs%LdwfCA(UZ^+4&9fpcn;W|(Dn4kbtJW>hb$(7p?_X|Esr$63`0JI&E;6n9@EM3cH zR1B}VLOQ~qIX&lsCT@#5b)=zxQl@VwFf6zAg-7S-Y95b`oR9p`A zBM5H|t%s*ghtvI+mgR25842^<;jZ!@+Uj(p+dPt#YsBo(yymT3Q>w2vGAbxo{pX|g zUE2PjS@8$9r(IV+Tbx@ylMh{ak4_-NU-KL=#z-s)nl*ngb3^TdcWSxe1v%w4gEIj( z=paa@G~JOt4-W`y?q`MKLhEmPoZ>?%wjKH4fxT0wvR26HypEkVC*y#^yOFe=v>jZt zJpLh6@QoV{Rnp_0=kAZL)HYK^nWw{_&D#)T$!~qWnL!Oue1=@FS%8i#r1wla@SWbcx&*F}kzjzo- z{fZ5L+r_jTwK>an4+p7s5Xs{04>8?)h-51-DQVkW@pZ5#g_!S^Zo)aWD_5LN_j-bx zJw@#&58!d4J;1}>hw{45AOPS=r(;^6C z7SX+Bh{AUwgcE2Mhr8S3b9$ar>gZt&Vt{xq-&SveOo=4CZmltF!PWQH>^Xf0_u$E5 zjjDZLHjssh0DTbtJly}#^_F2(by2(U+H?rgDN;&zhlEH80wUcY(%lUkq*J7n7U>k} z-hhO3cPY}b>5gx0p7)&h{P}+ArPp3-&M`-i`=0B>WtkQLGt7{;cD6`hz=6H3mLAJg zocxBd`BY<|l;y;Y!#%L%0BkE10=k0ia}sOu)ROp>T|DQ0Q>^e~!|R*lRR|frnTHs( z!k~x$U0UTGm_%mMe~#Law>v9vpr5mWkZnz9S6t^u%>QaVprD&X0TbZDw?)4_NkP0j z6AK;@ze0OS&2w=g7~H8Vc0fvh^&N3NmLNbcGlLuOYJ%ibRAr-pLK-e2U$Yv*i%5AM>n1(6OAhF6EcUHi2&X8f7Go8xmb8tYXTV=(2d!u zP%Gcbd}0TT+-uHnlBksz(26zZ?eq>fEP9_K&S|YQ8159L1Sa)vNS|vlfs|Db`g~c? z-whS3eNP9~loYCphZR4m4hvr^+)u}aazDpUK9%4(Y?0^fwlP|ae+u36Gzfrj%plc^ zabQne?37zzJI&f5f8x$3$2YwvGnW(?E40%UKyV3I2&CLb?QB7seBr;0f3mB$@_`(y zozl8z5DoAqapmdiC!pX+q1<;Q+1Omws@iN-j!{xt_LXb^_f+SJbok9*kfbJl6pfw^ zyjAz^_EB{c)=2%S2g6chFj*!^+-p@Orcb4#Of11tPK6))6tV4lk(o{vM@zV*wZfvo`3hr8CcByb z3QIGvOC7dDF;3aQ+~W>*Gx4m)^SJG&?+sxU*GsOLD|-hWN8G>cnTm{WdZ3EIAeOMrWKo*Ies@8o|T| z+gTmG6#d-GoJ()l|FyyKDD{;x7USZQ+}}uFI<5@Dj%C+}-jY+V zqHUGJYIKcgE)3Z5Z>U5>s2YN2^rPHp^T@ZR$wt?^863PC!~a|6lr_ISNH-zU zV37Jnh??zAS#j{vI73pid)4E)2FXY@%fG!K7m4gXi=p@T@Lv0kt2KVAS^cK+PbBqp z0BI*#Ta>GXhdw8i%=F85=hz455QMp9^AVwT3QW?T(`0qboQvcujSCmyFW@*}yQdqB zsxrBDz}=UpR@skYQHzGcf0^0}h^VUO+DeNsoiSB->Xfg{)2V5M)WC=#^kReLvO zDDr88Y`wkwsBCaO!wP@Y`~O{CC0BRJSHy`!g2VrEc-gW(g_De6@!dW~ly_s0vAkDo zFt{azmW(D+c_ap@);$Z(?4a)^CF!emc0#)KKQSG&fcLbM_E5xwo=%oF7eyS9jt*$* z;Qr9%7!r!K*~in8C#vL@wX4^ssm}xEyBGT9ExPNMI)|L2ulKYJ6Fmz0a@?igVCg>$ z%|JU2p6Ep=wj8Z@cx7oX*GsnzWS0JFvw=fanFQVUGqLWwuHx^);{V!y-`Gt5vFcap zC;ANSdUg-$_-Tiz;D48e&;QT)J2*&L!vJbjT2(^TNi@<1GfdKml((auMCo3kTlF=# zE9W#;#ccjfeUbq8)lcD{RWoG6{=X}c*62j61o}yLe*1+S#U8^Y$1MIo(XItz0mV5N z>NI{~4pO`)C>-;7$VkjX;df_!r;ZgkFwzWc9VL*r^S?_`{U)y@2!^x5N2t9bn@|p# z-InZS`p?QyL0UGmxr}_zB-38wb4k0a{w7y7&HYx_%TMlBKIo3_tA}iuRZj|90yzEy z-E4RyKKKU@{#XXLgl!dc4ixjj91Tr6v;HKew%vy+NToCf;$ zq(Q3OSJ~A~8HQL}gPqP)2c%tPw(%wGEZW5O2=n}JQQAwM{8eDCd1TdgV@HgK=199o zqPjBK9{$pfcS#ABder1uK@s&+5c}f2FV|1^rO#%V`~v3n1~5UI_DawI5l~JrCZ3@^ zQVm4|c`#ju4j3rZ+=dm+g$^sl9qSy99}oRAX33`O`(qDf8ZWV|&dsfSV24mP3APLZ z?YieHr^f$Rt8<>XRbqmjawI&QJ|lp2EZuH`go}jlXr5E1Lg?}3gf43aZh*~AK%!ti z>0W?jd5^W%qi-E9by&&!1OHb*GD_dXyFkMwCUwJr(*E>RkI|r5VmoLOrExq}$gK=f zN>iPmS2xcUy&t%MCkft7`=fvNB*QYRLER1hf0lx@kXTbm&cs48#|ew-g_{JHc|$57 z#J1(j%H&3KL*^ML*KQwO`DnId!yQtN@}++!r3YBuO!rZeJ}LAT_`hmMl$7Rp+!P8L zSo|dZrNr{L?woeAC<=`9XMZs+_DmA;HACXn}f8TT(%0|fl(B(JOf zSG-<$bgyVXqQNV(GQh&0@#ig|OQ!%yr}HuO-N4}?ZeL<)~=ec$?Q zH92WYDMhE&Vcz8U;V+8qH`f}>`Nlvwung=DTiO(t##8h6bpET2z%;fH30u%Pn(M8) zXUJd85-N;N&k6THE~55r8~|+Jw#+GhjcuweiReOZS__>~<~|Rlprnp*U)pk2$W_$Y zPuLmzTP^y(qS8Q`WF4YCVYja{)<3^Gg&lZr&HEqem>|JmnOaBn9ASPF$GG)>N&$hslfQwEJgDLzl=;YJ(; z55ZLV@fGTdDy4#YGlkP-^roAOCw4+}mwnWVD*hj#uhA|gs;YZo;s5eTOu1{;w;$V8 zw;-FsV~Wwti3_VnA8{gmmk>V}z4d+RUD0^|YPm>~_iL|l&C11xF$1DT zs&sti{+63US{1~SQ_BV)L1A2UupX3rcNCPL#5|MA^lALe(qndc=IR3~NoZmeM7oU3 zjvg2Dz#W(a#R;C3mlVSxdReUwoTy=IXd516iAE)lj3C9w4lULW)@VG!ow= zk|hbKv*}VW{>|=M!+-&#i8h*(hf$m}dyITD{@%a@2+`R!%ViVML?>>x0(N?)&4y!r zg6K?+AvtEWMf$vof0Xp!}dp|WcOVHd*y~fWMQubIMN{Gg7MhjfP=NG7et0u|h_di1| z|A|jG7M4DQ3-%1c9zo;9e-`EB9BIW+)jgrz1*cPyk+}l>gU`Sw0NmC&WOMDu@e6rZ z9+PIZxPXB4;N$&xc(J*asISLfqPyX9t|utI1uTw9*eG#*+^Q(XBGQ1qk*daNfm>9{ z-D+Ok9pq0W=ycUxYe4ejK}SO{GmGfAgRyJ}^1I>P2{{P-Y*Ju#QA9i`6w*Yd76pz% zH^Oh8CU022k`7S4^)6wXPqqq^f+15+N)y5M zhgbMc6m=K()%M1i5vGOYAeFp?4nEczWHFX;Kklq;_iV$}W)}I?;W2Eu1=ah!E{d_z zL}x5&uD8OXSA$*XakLx=yh#xR7+DA=1N@JY{Mu;Vjv|DS(b!dQwakhC#V_($@11)7 zwlmB&tX0|rHp6>tWp7zbd!)vBqqo8&0(WT9RkH&jE>-ec-FMZe?ru0hH$_(3()__#)kFC0?nk-tZP%3qm^6N>vIM1eU zbS;BCujPH*Ui-`unL*)a&^@if2a!YN9eM9HK#`L}9A)0m?GzYzz~@nz!9JL3CFvNB zs;ha+(HJg%3rxFKRscsPZvuBsX;_UuuIXREX8jE{YKS1rL0vQh57U61E4(fI2|~aX zWB0)Qf}_G-Kt-CKpRcanWYdxGx2+t?thUC6@ugyi(#x)!b@Bk8qp?PbFB%ei^ep0C zF*$6|Df7TujWMU(iJ3&PuN|Dget7IYkq8-ei^`mJ)(bgw%CU5OX1Ua#8QVp#%*Y^| zF9S<#YYfHdZ?YJHaWepBv!3B|T7jVog4ZIUSLT^~dvKd=0=VH>e8O_uPQ&r($NBkF zLLZUSssOJt|HJ#Hzt)Gf}CT5V%5@FZe0uKgySp$yguzG(?mTO zp-QLLY_Hy@NS`%6@AG(r`o>Q-$zt`R_urG(s$;a&H~jL63$>Hx5qD@-bzm2wPxBSh zeK3g2GEe}NCnom9e7XpOnZR=h(V^n6D!#w&;5Bc(h4t^DaES0Nog~IL~ z2T}$1%$CT)WWks@o`~Q6#5LPq2m_$knD(qVTVYa=y0&$sYv{?C{=~7wsA?P?x5&Ad z-1}p&?;EIJ9bWsIr9V`Fd_{Da20yRA{~-)cm;;W0i4`<7G!_N@%>PcG+ctcXlnE1& z^4(&O@>dgOJycjlR__|D_Q+|F3%|Ep8)}+mTRH3Imkpo z#E*iCE0la5#LkB&4eqMSVY2%r=&Sf11Q(u7om&vIoVQu|sOol*d_SvvbBRKBPvH_}fE%Dq3?E558tF5{L!feEDL^L!t#f93u)++?=tJ7Ab<@!8c%op2O zJok^cQjcay1subkZ%0|eeU#<8!li{j!Z;=OKgG(<`GiTbLvdY}_fOlGM32arN6}II z3CY0*1eU(pcagvq+B=(vT0;rkN6`fT_tvXvCKhSqobQ{N*DCjRi_KO6T>+;*RliI7 zaC@+iWr-%GEcxi?WTYYHv62Sr`CVJYy)`ZuPt{CS?QWVGbqHrhAW^d`b!G z_dbPR6yC`AoCw*W9mv$I@{`vp5n+LD@psUB6Tcge|GR}%K0Db-&Zk~AAEDAc9a_Gw zhu8d71ZfvpM(Q(4#1S@xBQ`(3G9mqn7=X`dROOz8QaL*=U8isCpt!S>q3;oe>jswS z(~vQrF5U=&EdKxGqPcgRYQ4-rHd+~pG+cO<7b%;x@&n)+^Y&3|p5k0d!SL@Xr^sE= zAnNw!P+cu~;lW(7Nj$eN4;6@T1|7j9GZlM=E!O`LK%TWq>e=pTmhByTdw6U@ny&dR z_ID{6;xVuI*tzt-{L!dSAGz0+Z-01UGedVN6c7zp(kR4sxtJ678`b%t@zg>j%i#&A z7$x=*C@w5~NL(2)M%?;uRbM{b$H3=0mXq4%zp~~og-^x0+`M0UUR=y7Y-D*z2cvw} z;YA};U8jr+2;?49!1ZFx9c6aF7CW1r;wAjeR98fN-;H^EKj1smXQR z2>D-*6RT-NO}jfvc-xiYx|W{s(5Hw@ z#E^E~z1cM~EGa2wy-&KolNoi72+}oK*e}G=4j#CnDT~k1GJ%i+``l|94c!gzm5tP0 z%Tr>Z-U4H1tma{hYe4e%RcRUPwSR}=!syMkatg9_0>U->)HaE z*sw3|uEj?LAek9dZ_qnUNd5DOkmiMS$a~mp8ur>Nb>k|_Pc@!OJ5OI z#<)?hJ2Z}JChcvb4m}(K1xY|l5U0NRjjXO+XrwaJ2i_^-^s=d3s7|Q_hZ7I)`*wbpR}K4>2+QV~*Xi4XPI zb`muK#dl+(7duUgCFHzrr#c%9*ak{x!vAxY>5LMU1!~K+f1daq9?AMmt+|}ujGJ%H z&0=0(e&5RR0WIWwdtI}Q-@;@t%_YtHe!diSysfC2NJ}$X39RAUGmruoky_?aNzRLb z_&3sGG3VQd_>=Fjx{9yHn*~;+aqU>_;?i9#LJ^0*_Dwe8=rJBt&F8LwdU|}WVvN|W zsz%rSqh?${#A=<{svGqA- z(`gLPR`hAe#5|zk-kPBD6`L(2DZuD3HHyY{I040jM2gWY)33u11 zDr025@*I6aAt$zek_Aa;VdQfCPBhTlcsRLIY;3?R4MK#Ai8qxvD}3H73~|&_OdL^m zs#eKq>*swTM^ew?P$OR;n_jL&gGYhh`LV4)rSaBYtYy#HKvhsttJ-i^W|>ZX|2ea%*l5~tuBb#=od4^4J+I%yjU8;rpPsizLv_Dk zz+b6sM?Usb*UEABaH~;f6OkEiw$(ZE&Q74Co=*AC8#n?L_w5?L)i-voRi-(gG-l$- z$aU2Dx^GEnYHHlD$>K#bZN6kYicvQrDzYZh)@H>EXF7<49?UbMW>fT^ETsA38>TN) zvLJ_>1(u8ytBH8cVZwA|!>Dhi2w>)Jes95VrTH3g+r*aJT2CddH|QRJ(j2xoopTi+ zPRsIE)mrdsD{roUr8Q7KO&nou;Mcd+xZh`U`nqmW%;YM^x1kX5-C7B6&AuzPx1)J!U%rW~{deAKm+~2~Wuy)T~75Ak_;@nIA}b z68+jUtBuV)nw6G9ENn#bUHKIy=ZiHv7U?9I!`R`(!JjH$cS5E%3 zW{shYiV_aXX$`@%t;lP1F)rE4e4zp^`jDG=3)i*2YuCx2PYt}T-WhPaB$s~?Fwn~K zEzX?v*Wk%(__Gw25P$w&zbOYe(D94g`Wj;qTk=Nd>z8~|eb-k6k^MNyj`~8~icjY< zef|6RJs7gGJkHmH_DQLbH)&{o5I~ELkd1RAIbN8W|6Cz9iHI#8zJ>WpE+>ZvsGMM7 z@Cg|oB*XTtlg$22-Sk!~C^(Y6Q*w$eA;Q-0Qeu79`7Hv_C*$-yi1?7}84FqYnqp8^ zkmXihp1NGfYHhJu+*PnujLkW&F@3(>H~DxoGAwgq`)h@lRhvnfTiM8_nr2O zj{aKCjQlPtQw8n;Qk<8)->(MJ&`%bUSrhtgD3wQ3#IMjR(O0nH%f+lLC7<_R8H1z6 zHFQ`8tIF%tPttOJZ+;MUM|vk>5P#wxBK>g{DtSnua~6B_E*nfTtEA=V)ojC2%jrr2 ziOBI#K$vv&MtQT!@Yba~&$f<_eOjjb+*gs&A8)m$2B(O5cU(1A-)9I_$uK@*)AVrN z`W0qB9-#-Hi14AFNRYd|}r_8o98WAPpngqF0aA_9Otme;I(m-={dMZRMd2fq+0o8x=ny6aD41N+AD($l{iOUn zs)jvKPIye|HksaFnrT4o+3MNc(;6_xT#z;6ZnyKi@hnI&;X2C=bXft%JtwdIJf6ql z^k^3zHhe={>7Q=PC**f)Y_zvOa%wy^XS1A|Q%dmM_Z?@8-EFWX4~)_=VvY3iO{=)+CGu7Cn@nM==5+pF$F{T^mV8LUE4LJ?4?m1_;5y0}0uxc3jC5Eab1$$s(_D zmWUt+px5!l)WN%^y9jt{4dlPVg#oqy!VZp>UQbuO-RfAn!ERzaQKku@c)MF3WywsV zciByX>}*i~%Q=@@OQ}Sx@@X>i2MhiT-u>HgbHUBof?DNNq?p^k!qBNj27QmCF({*BbNiG#7sL2PWAcpW$V(lH_HW#UW=?PRWB_GK)NBk`0!eGC z_;&?FYTSWOMVaN~bX%bw>e3#nPevHwG#qI1I9?SThOlN6yI&yIkCnA(fZs$)w=$ZC zY>e#|?rxsozi<*5mj(QG(X@MkuT%@ORSSjuAUVM!&lvapQ-7-q)XLQ1k=H<@Cv0HP z=mx{b-p|LREb&34#vxp_m>AtobU8M0RHMc|;!q>$`r$S>D`WB>--D&CQ*0Giq!rIS zpGu=AkC#5Kb_!WRt7F8os!yCB!v>w2pkZ!K$}_F=n38eG#=gJ5hok2=!Sv(qS^8}* zMCAcxGoqHpY7hKx;ju{$-jC>9C!SL{@gLrLc~cxMI<+3-fXlM}(k%@z#$8eq&GMKzXoUp)EH^kmdL zppj?nCojYpc2sYb`6&Ip@X%uB<48Zq=i3d_D5#PE+D!S!3A+GYK@ns}Wsa3U&Et8I zgwO=)ll$yvrGnlNQtOK<)ym7MN| zTHW_mBw_*$`rUoC!MpX~;3ZXIb3{0|!)P!aDwhHxK+K8ZG8Az2`1ddx$)TXGJy9ysK@PMkh`VG;O4DDA?7> z0l4ZY*fdc^oJ+*@ezO-@{|-egT1e1sDSba-14}v0PQ2Iu8YUSSbDCe%4foCB()ZCK z8Ee2ALaZ9oH;^;IUWu50Ui8OaWk_%ACn`Z~ebtvVWK{Pyh}u#}ar4Wpr#Zo2w%p5@ z4pl%pmzQ3QOeX7x1S})KAQzXlpqTGnIAo}(yZQYi&g^ljot>~(O`JsYM{UcR--d{x zc8S81-tu;tSkO^_%Pe&lCi+{-J#!;^0KFUu2Se6-xnbxh19f^U?WY^HwU`*I-h+Np zL9)f}lIM{NyUhZ-(ut$vbNe5yDOK$mn7~WH8WT0s#fL(ANdkMp??>>0i=D;DxR=xL z>6`C@wA>qMJrt_ey)fhx+s8z;~Wmw;UpB4KGq)+n6E2;UO#iN~U zAeG#AkwTt+emZ75z5U&B?l{Hkc}C#EXzz%BO?6+ zp{5+xNj_N|h}vSPgUJ#VquU*?MrMvDC&isSdoboN9Z%FId-OCOPVMn#BTaa30jgy# zCjDm{T5^2w28jYB<0ei(kggwU{zA@e%@^P*7Myp{aqTMmy$9$XE(}T7{+Z7x$BZ`; zCH^eNn9e$c&%EYt{VZ1-gz{&i3lZ{PdP`BF${XRo75l5#h9@HGNRr@%H8?;D<$0C0cCil7 zjZrX=PPppqimk^0X`-QyMEMyGNiwS*`@SC;S<@N3)BFHYPtfhk1+dG`n)Rl&N2=P- z-h^1y@(6pbiIb7WLq-?{2GOtPlfRkMLhIe4X0( zRoCmhV=S0|&FjRoFNXsB;?HrjFO!ef$(G*!qQ<3E-9M8lt`osuJNI>Lo{eCt2*;oS zl?9}~aAvrg+50nHE{r0)CzW?~*a&JD*!mQBId zw?Av=?!HX!AAFrfsQOfOwMU5BnXa$1by}oPCQFysmq@!2ujLsT`@QRNiMnR`3D4;!CsD&yAf? zdgd_$#aUs?Lj z%~Zmv!EiIK6PshJqQw0|Z+t1%Qr2=12MBO|S<`*4zSIg_xcsmw>_%U3Sjk%<+Xy+uKVw z=p^5FTtpD<5qJas7uP9_?WBlaAQCARg~Qcv{;TAkr~0mzrASNy^YTc3}PE3wR}(ePj7te>`q zAGKCqzdUU{tVl|6Hy}g>3@<~DPe3MQ!l)4vG1WST)AeA~62eJRYlL^_(Iw4X)N_2t zs0H_i2Ra|K>@sJGcjcz5n4VzIFTWoHdW&yTlKGmh(>{@?!r|>IJ>f?Js+jp^*qu{E z+b0YsOGy0=VZoM+;?8I0p=MSL4bxvtb%;O ziTqb7E(8L4-1RHcj}lo%9^W^pl+z*HonUud#8ugnKn7cRgrAbs=X3z=KDo!4h$X{~eGKH27Q`7qP%c?Mw^8^8fQd4(Sh`Wgu6|>cdQh zd!RMZlvLN+9mXwL7#0-nS<5e6$4MIz+c;h;q+;1Vy>;jcIxx7 z=2!j$3r`ohq#31J)%7;mXI}n2)6B5XHLOHEFdlT#6a!}efy`>Gbj=^l;-f=!bRrFJSd}@g~nlqKLLkjQ*jcT8bPsw~YWl zSd87QQaQ9Fu2ZIwpaDA47j}tGbtO?hI6=iKJf!%feDMB%tO`tV=qE+vO^8NVx^vIM zqiGDTs35Dp19G6N-{SG8Y>fjsZ%|i1e7XVfv{Z{~MlYZ&hLdjpXAQ=*6ubG{9wbaW zyRtz(6Bk8^qNO-xXH#yxv4`JX|9uLi#~2AWB+ zkm+2qJdt<7QSs}m4dDlQd-4b^(waIFqRxPTBvp0Cm~g_7yK_d6Ol~Z4*6Mp})A-Z- z>iTByw<6+C@w96E z)VkU3uwzw4>Hm8V&$@ZV75U!ozX27;r(x|oXr^0V!}Ud)nz~ye6Hj_` zyDSP>{0b?qLBQX9O38(WBcj^~QV#hcne8^_WyD+;bkwKFoOOtGf(dhB>7i3bK|?o~ zZ0BIHJTee@ma*~B3?08bOXua7;#D?uejMPSKte9N(V0xQ$Ge6jDCv1n!+KJ-qvtx0 zPPz@6YWl(4o=QduD@#&&k>ciKamRH`PlWsRLjEzUT>^8kcv{p4<)?`GVJ7 zkDEP$$$KQ{sQY=%kRXoOX_5|aA*IKT0O6si9I~7TGrVP2$=?KUEcbGa<<2W$!US#` za(@0r0rZCLzsMr=6RZlsFFH?3#?FI&DUv0_@iBA@_xQ9;4v_%3`uc5s56CXbpKh%< z(#8v6!FbeUx+AbgM!wD`&qsv}bQ8&egzWVvwID>1NaHEeP3$^m&_f`LEvT6Jp$AsI zM*&$4D<^w52ADAft-X!{tD%T5DM^9D@)Rm!}Q?4mu97jA2=Bp zXx-=k9aKu3_#+*vDp`^N{^$_d7@{3LU50%jj`)=$##iGnbVczG!As<0)%^Y zoA0K4?I|xP31)k_{GX&;CqD;E7%Dq}B)rRWu^Rw^fUJ>NS!k$1RbSgbA`*kG&8F3e zX*ZW)Mz_fma=3Du7@WAPdZXSdijZ8KJAf0k;P1$=b)5d^LGz#gTp=95pQ%qrJZ(Y$ z<2^=?*LIA?A6h*i1flU;&CW7QNkN;Qn9Y71@%S7WxxliP%DpF|9H2*mnXlolj=rI* zM?5E~X;ktG!++wP%CaPA`I@26hh!e#`vxxFp&EGX@zM74Y_Hr+h?T;g|q|fI?FN zo#ANU6hkJ<>2(Vt!36CA7avgMBV``2$=3$GG!x4lUh}jk>q9plq@qJ$GNtp?Hf?FSHMWU{(AB? z&4bMVT{OfJHX2sJ{f4=9nXM6Q#-8X|EJk0 z9&xJ*DHXI%QN*RAE*!hPBxWSq!elCshf#AkCo&ss4 z0+jIap$SHWkQ3?Ogt$-C$367&L8$$@;PAyz%;U_jDQT|Kp^#1zb$tAxyi`|VB(9;g zcrDS-%qs`r2qKuEPyn2hWW4@Iy$Y**$`UMh`lM>mmkAZHF+vSE_Jv)1a{5>WmJ&>n z3aITp(LVpYsQ!~4v6dT$c+>te7Jd;b7nok4*VToQKp{}EEOB}-jy}ObZ9h{bf}2j1 zu#{zQXW6|7o6ysJ&)OQcSjuUL>FBXn>{svozjLLm-#1nRuvGQG&ezFE)~QI=z0L&D zjWVhUcT1FU6cP+v`u`1LWx7-&tZFnIz`0tLaZ#VUbza(2UFY|+ApXW~rPr|tAdCNn zNHwp~0SFo-lxqmvICvW9RYf+Iu6G&&5fYz&3hlcj^FNyxO>mRA>DBQ_Hf- zxcv?0<$ND#-ZH2oya+C*k>o@4V2+H;+9sWN;dNJYyv(`|UOx9d=F#B(nHS(w1BUAQ0jTP)?Eq^y_YESRAW+`yN3j(B z#viyjEWvlJ$hU$owO{?)<4vt(}-v%`QKA%9OL}A^S7ic{Ht{_%W6wRuOD(rRrMRbCX>111d{1y zo4`DEQ#S}@O%=g!yL9*VC%0>Dt>w8ubhY{JbkN?1u1erF$G{&yXdmbn82^BsEa?atN=X%|pUJ@s16UhsUhp4QoZp-iBv^PC~bLZe(HkXcF$g%gu2kF-exl8e>X@IW`*pxF> zu3XM88YF?5NE6tl(RCF2;+sj6C*^9-pW;!~SxpKMM4G}}pp=GnD>xUO*X-)6Zq?sJ z?MZ<+w;!CO(8|X?_Tfu^TeM?)J>CMLDT_|q5wXC$7u2*C-gBtCS=)1caL%bx{!yS3 z8heU2*yU{>SgFoD;{Hfr4$||GY_u*vhSly>2Beqx3|VqY$C$j3EeNzTE&XV!#)H;O z1l+V&7Q%(7)o|v@00WUNk=1pc9}Pv*24;UXx=Z?DJJEuPfr6BeQv&NK;p~Le0^;v< z+pgOZbzdQ3GUa128BrzsvvsC?osFDb>CKtG&eZj%#Q(qN}Nce11CL;5_6p8d%%B>GO>S^5)x!!H|cMhz@s=!mR3 z`;)8j0;d~o-0(+Ou<*IY3`W3)P2#%Gi*T%K(eCff!GeSter*WG&-u^<$d7y89du(? z^XxMAn?_Z881Ecz(X{7dafc!iJ_S@5&Nf4f{OzvMAZ>G2Xo%vThe7~-K*t)AplBb; zMQ9%i6um$9m9ZPS+vn;;xgJ$9L}H{yMC^c(Sy#Wl1J3gU6865VNCjFm17xrStnznx z{~Dnm0)OXk<6=Cs1*APrvuMt8CtiG;Xk(FMZ77vgl29RdoMu?n3#H;VH`uFL*se#R z!QjUO)Py=7nKPa)=IuD#X&ctfI>I}DgG|yZ)DAGt=M>gkE-~|G6=o!PQcB|S-3lrI zw%-%(3QJI5OwR%bent*yL|piRwXiVPwIUW8Km_FUsRb6@ zKjDp7X`E)5w8=wVAv`a}ToqrXCID%pk)nXs;9+asal@cae=~d$^|5d40T|GjSj>xc z7I=0pQyYIZh5wq3?1aYu?#7hEgznAgM9U5Sy9h%15%MycP?;1`JF3+D*s6Og|NYN# z#Udi+=2{*^=o)K>oX;wMOk@V=y)%nb>-1m)B=m}>m$E1IF96R4KO5dQFcY8yIn)nm z^h=HRPCL76ilXax1z>DgO0ROfYAWj9Zj8kOkfVWbLd)voI1NB54q*C%r8efUs3#J5>`!S(T`<5To$Hl*WB1wM?pxqz^-C8Zn zpZ0iv3HnX4e`I7+&Ap`O529p=WWdV?9%!Emccv7?Kqw^1efx=YyPcb{UOl#|mMCgm zh7^+;5NfCKLC>=-7t=;NC#%}1L+r+ z#5k+Nz)3>KPtiEXe83jl_uIP~%3B$q_i0Qbp{@a<#0h$A{qEv;;5pC%Agme0A21vE zwC8y*8Q8e_!Fx`t$A{jO^WL6Roq&_|4CiKty#_lGr*|LA4SiEHT}+L1K}M*tE&6j`~vU+86F}x8U-47^nT{^Yy~8#kyd_u$Rw}@Nf%h>-o9FhW-nimgb=o zjg4j*01XfN_#V(l`VeGxOby+F&HL}$ij-_MCV#7nV+0D0zf!W9=>};@SQ3DTD`!;ES8s=4*^sWL<;sYxy6ICYi9W7!_=ay_bWX)2A` z9Yz6Fn{}VHAJS>~=z14EirXh_WM=f!wsoitiHioHUV;dHPyxZt2Q`na`g^;3h!QbYHv{++yn=K7~!we{na*#p#pA$K0Tzh74Kk)5Sn zrj+nFXodPMZYzmdcTQ6R>ZyCr4S8E`&-lPQk^%8uWXCO%e7&!@@1@K--imz0e6tt( zfqX9A-YFSw&)Dw)ze*q4NE5$W*n%ZBiEW6jH)4Ar*{@qP1lPZW$B%qDm-pbM6>4u- z932q(xc?$!Bz0?TwsUbBz3Ea@@Y%2%?)sd0kdttE@O<#tzSp2v-`lm|`5V1*pQl74 z8zS9rhOgF+Dfz0!u9N+K^I&ewZa7<5L&zS)pAKuBVO8ux&7lZmZe`}!l(I9-8^b=TPYQ|xoQSqkj$814NOJl`X!O8S6 zo7B_pwAJRk+5I{5<$j(fza}i03)+gg_Hj0)X_D7-(20NA4xVi&7K;|i_3x=&5T2-S*p)vFkf}saspR z{?j9Y0)J4n9-iuP9k> z-m^inO}?)(zhW<2ZEnEqwcvw$Ssk}}@NML$M~SuCp{~4y)a$pU**kbN5LfswmI5!t zTpLL(AG>8azbY$jyU-clT3)HF6N8W$hsP*I`_soI{I}16S_a$2F_f~fw3XT#%vj8h z0={-nwc|!>skvfvU8%)AC7`(k^M?kZl*4|r(Yjwn>J}EElCfA8YYbIL_>npqS9RZI-R6Sa{00{4t*i8RSA0BINO5Xs~=v zu!|*0RICo9U^55__NLbCYEWmjVxPFC)cJv*0Q}j2>*;+r+M~RT!aDr~)XrDdzN5X= zhDY-Spz&eCI@gaFqKPq$r)xW$g0!#Ebgt95++7G$&jpP(mLDCql-HCgikeiTOiS3b zw0=)*D4JXwJo3uobYBvj7YFw(=}{5i(llGqP+qn;v%9*e#09@HXC;E$gbb7^!CJC0 zoA>Jkn(Sc`uYO*37Xd=r&1c-dGfT(?|` z_V|x1u~5fMJ`nikb1?al65z#g!LJC>uP|&H5^0v(3{}pq|=i zMERteHZ-O-e4E~z99Ya26u1`On%4v}S%HFAdW}ta&Segm-Fq8=MXzg+ozHCXi~Yie z?z)BBhTGCVXP>qo#ekJ|==gfXbJOKv7ncWjM(eC~x+!;&xNk0{FH&u$eYmHDw>XM2 zM1VW4vA7hx-js${-)bhWgoVtHEpVr*OZ)n_|M<`9Ei^n!U}fIu05x<>mIZPCmS4wmv>p3wDEE4c|_~ zf_YO-`^DE%*Wmo~%0g{LI`HnP;a_*_r^)N}_LfsE4TKZqV?cWBZfc!A&kmZp*CKAk zo|z)L$JB{OOTbmPR>_c8zbmPrV0E8BBh`HKkWy8C%Hr+1_o9zD7wa2~jKJY*_G8^^ zW&ruo*NESvqDx-4zma~tLptD^6*ry>wKgYnl3GGlsu7KwPtC3?!nA%KZEv6NP-@;- zXt(A)btJC0K9f>5=sq_;GxH{Go2{vGHtxMe)j}v_eqLix|N9HeTW5p)>|6H0^JeR_ z#SBhMaKL3zH4r+~SWumxHLC03QFT{r?%hyz5;91Q%#e1uCRTTyOmzp8q8LLvyJCwb z37387;ArdsDwX2x{BW6Nol5BJ?H!1K)J{1pLzObc<-q zMZ*p)JT8yA7^$2lKKmHf+q}As=WKb0P;@jD5LxHKB$4~rU$y8@8SOnPuh#-|iG5}| z#r^169rbnnM=&~}7z2oxJqWYec>!CK%4e)!b2*jf|7-7B!;;GWHKmO)m0HeJR%)F| zZ89}A?$mo^cIL{H3OGsI|b(5zC!YbTD01H!q^{-gu zg&#ikca4syJ7a;HRg%&IKUH!=>FnNYMzbHIi4G9NTd5`K=o!D`S?{JAQwC74MBuycRD| zlx@?FfW4Z?om=G%@|{C0Ls`W|l3@g;MVRXsf zubyo17vw`_QcW4-|61nQm+jVp$#lC$g1+WvPh!*yeejgn2@^P1b>_c`~B`lfzwG%LfAJE)+5#=K1a&EI!aEc{s;Tvd_(Ro?5DEHBDJFFf<-1rj+yVTt&# zCYZGtb{=WI_WhLyH`_{)AbdgU<7>&p|- zI|U;ZVECnpSA5X2Ik3Z}BYNjSWM5AhWxMZ9^OY9Sj@4eQ3g&!7?%2bILuyAu zUE#g+_E-y;19HgwtU7V>){VAG$);6&$5+Qra<6?yv&~tNu5jy*(Uc9`9oP)N5^6>j zw{?~!@iGDptlf20-;#4?`EKdY&CPKykcoXmUxVnUUdfG}$SO^UiJ|71)1HYXx#mRG zb0cD@3bE46z?>ClGUlb4pxo7^P)lm~wPZ2ZAAwq9a23GR{4Nk&@Ox-6H07DN3WONu zSma@@oYw@7+Mn*vkE)peZNP-5m^U;BXJyd5StFV6yoWFk-~Su^(X9BYWkxBvC7|&L zHy$iBw;$CR;eL0PnKvXzxso72jaTUea<>!#v{&bswbr{%uNT&r02G0^ z4WGq&STgXIE?`$6aE)&SU#_?pWz;{_pe;SZ#t*}SMp+e{DwZKVcB9?eC{6!abFwA$Z+_2j zcvWwtv77jJ$FtHvYreIF$&lhWiVS}x6^kLO(1=>DrD_PdbH!LKfGtaO$kecY^@}zE zLvc;8EasHTg~iK~)136jwr=ch)-WTM=h~*8p0u^37Q}^6OXLIBcmKh)sucq-@P zxnOTe_}iRs2lF8TOrldW9#^>F+Aq|HeE|Arb^he9d{w2C4yId4tX&(`lt4#XILLeP zgp)PfO*!mOj>aPoal*P!iLfpY+mwgU!fTzSAXifa5Zr~xHEAk&uqV`zH&)5 zJZ?FDfVTg9L`m9SFwAEBzy*u4gX^lbiQDehs`^6?@oQq$xC$4&2NoK z+sT+u#hD%{l%U_A=r&FP8yqyqW9KwuYG(rs>7|THrB2>#0uV0*9loQRF0%_A%Pd@4 z_P53WpDy*)_zK_C>N3-Y8=Kv4$ZWoB_l!Hk6u;Zvd|cjD^4&k`bv~+ze>i?K)aS9H z=(|*r#d3~KhvPnC%F4DA2G49UO(e4R!`N>GW{F#NwG=1bjWat3<3++d!#iU~zwu~l zO>q6HaIf1Xr)1JAL!l_L`Gs?tB|A!ozU{AMU4xCau`^eAO&`2+P$XMR#h=;394e+u zTOjIx@r4h~L2nW>Wp@d7jm@h>E$m^Kl63T{z>XxP&Tnb*mXuC|ayDmvPh^Ym8YLkq z-k1@&51@BFC6Q#rxq;cPnLtbd3iV+6)@<6T5$(~_GaJqC7Mwz7-`N{9)WhQJWl>6< zI;wX+Y6+!o7RFJ2H1-EZ1)7yvg-*p`@UOMPgM8Ut#p)y+{pS{Gxx33 z-J5NtJhl^_z-?TUk6so4tW%d2Fy&DI7&h7-9KXL{tl+z1*Y)O48%V`7E@r2~ufz@c zz69^JEOQ8@COMAx`F8hFE+VdmHs8KXuWHoHjBO}jVol>@D5t=ZTV&7;!1aj^mt7fW`pdd2+sB;91G$t<$SW+^XUvg27bC@QK)8;intctQsE zozw^ax_jXdZlA9{m{mhE0TvC%4y0@u49awcx|(l?y);-k+d0_c5Fq&at4RzcGV;3MX1R4H2o# zlsOOCPB>#3_v6!mny(cLbr(9^Z)BD)d3J?#jcph+fBo2B9mILJOpgJ4a>cK2{{(zK zgNJq109FiWiIZtwFeovt0m8HSSMkbQt$$pC@8!pvRxWaZ@NQ%W=Cstr+AoAvu_>77 z2wQLSk2j7!7t@XCcvPGL5lBlRLRrk_wa9-1u(0PMIxcQZR+#w#NvsA4?tR8D(BQRd z8q@P+X?wOonr4m669|Hd0rnf#{DH>kMME}>h1@0)CCf=|zpUXsEI9z+MI4_rtjYIJ ze*`YwlHr7UI7_`xHB2?BJu*GuqglW2W%P00!b^0;MpdlleN1p$+NY{c0AEYPmCa)w z_0jxl?6)HXUi>|UbBp>-uiYi&a<~q3swP<>_1n;J#y4|Ta+^1mhUHY0HUL^1rKyB* za+MudKLVg}Vw&uhDZqTu3}Dh*16?}l?+f;mDxp{2H7akvE>824>Tu`Z9K@CfFn z$Tc&AUV4>4FGY0e;Fli<>ru!uhgW4jfPR+n)ZP2%8r0`5xOmp8$68b$IOo$>Ex}Sg zqqHzCVpzMcvbeo;Y!qT9F3Rd<&46Kzq2*h4Z69?Xm94~D^FBNZTZTq9%jN`|?W}oa zuCW<}xWabKOp*Fr7kR^26fR}GNXVRx6pHk(d|c3Y8fP0U6Rw0IH%ew0qFm}`U+@lT zR>ECiIgj!H&mt1|0hke}gSs&BjWHM#B}NGQY`usT{4kc>6LJhn0m~gFl2l;P41y3i z3^i9rm`r_{W3W{Odj~j-+jiJ_)VN}tJZ#3BQSU1yXCfCoC-09_i?Dmq$^)VFm10pc5GCwI z?Ek7fIY?tKwr!N8u8)veLUEoWy3{AA;muxj_bn}HP8aMwnr4F=3KH3+g)ylHowP*3 z%H{joqCQ3)Cbx#O$GfF{GF1jrD1M_w(4z{~H_3MFJ{KWlKXPxkZ8}zaR3;r9P8$l$ zUf0AKAh+C8PM2+RS+{dd+jGZs+kw|+9u>|FQOX_JKXjiQ+w;|S0~dqtZDYNO@3f~- zlcKpv)TWfm8zNBjPbe6tUOjVZC->+Dxl%%a8suf1CZw;llf$ z3R08f;tau<^G>Xq8F=A^7r-l%y?!SWZ^=Vd-kwji460kcDTErm`-(`f;V z=UKYuFJWk(0H-qduDiUq@>kbIqjMHKiwliL_t*+-Twxh6*nwSv2M{D5u_^tV8Bs^FGdI{9<(sjwmx0irP8zm{imip zj9Ay)9vmonm zti*|fCwGA?bJGD}oK4Aa8v00wO%5LQG-VgbQ!@llplg)fWtp5TRXjoJ6WP@2MTyr_ zYto2<0E1mR8#pLxN)($j_$r4!H=br*zxjPUD8n(w#>0+19Z$*(lZNE3)l>pTRh}G zb=TmyIeKNT8>`@Tmcsq2GK>pV0is z1YvGQmk-+HuO@B|1KP_1TbG8#MxiaU`n`duwR*Y2zN&QqxG1rC{U1GgB91AIqv@PUx;I$+OZ(pX^@XWg7?Vwd_0 zyb{+k{DAhWz|LYvaY&u&Z&&j?XVo|}!)p7s+`1vCYTO03&Lp&Ep={vEBy7Qbc`#GH zX?0gXQ>X6I^WO)ltSa@5>TiaQ)+O+Xu;!{jN z0y|P@4T{R-*r_gEoS*RYZlKIjE3BKkQ#&P=3ly=Zv^SK;GAMQc9k$iHqg(xC(oyC< zTt#VK%liScJIY@a5;NtC{Cc$=3il zdo;2M_aOwjmQMtXT`sCLGou?po0o5VbHhw^VZer(1%mx4JMb7Mk2o8{MFf&p3S3Q$ zgUK?|H(_C*oq;v8`EV@rdvcH<`0QHV6tGC$EoSCbKlxhcINC~aTyrqs9(kpr5SuRR z0$v10tmoez91l&U2lF4};xZC5`&-0*4ivvHPC9ttMcHjNcl;t;r`azW=DY zH7~6N&?J&pXM>L`+W-BfX7d1`@${Ol0>DQv`#-;A9wUOdP^QA_3EvKSPWe>shG;ZN z2CO;|&!3li~UgBNI}Uq+)r&(% v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -html_logo = '_static/gnocchi-logo.png' - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -html_favicon = '_static/gnocchi-icon.ico' - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'gnocchidoc' - -html_theme_options = { - 'logo_only': True, -} - -# Multiversion docs -scv_sort = ('semver',) -scv_show_banner = True -scv_banner_greatest_tag = True -scv_priority = 'branches' -scv_whitelist_branches = ('master', '^stable/(2\.1|2\.2|[3-9]\.)') -scv_whitelist_tags = ("^[2-9]\.",) - -here = os.path.dirname(os.path.realpath(__file__)) -html_static_path_abs = ",".join([os.path.join(here, p) for p in html_static_path]) -# NOTE(sileht): Override some conf for old version. Also, warning as error have -# been enable in version > 3.1. so we can remove all of this when we don't -# publish version <= 3.1.X anymore -scv_overflow = ("-D", "html_theme=sphinx_rtd_theme", - "-D", "html_theme_options.logo_only=True", - "-D", "html_logo=gnocchi-logo.png", - "-D", "html_favicon=gnocchi-icon.ico", - "-D", "html_static_path=%s" % html_static_path_abs) diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst deleted file mode 100644 index 4dcb0b451..000000000 --- a/doc/source/glossary.rst +++ /dev/null @@ -1,33 +0,0 @@ -======== -Glossary -======== - -.. glossary:: - - Resource - An entity representing anything in your infrastructure that you will - associate metric(s) with. It is identified by a unique ID and can contain - attributes. - - Metric - An entity storing measures identified by an UUID. It can be attached to a - resource using a name. How a metric stores its measure is defined by the - archive policy it is associated to. - - Measure - A datapoint tuple composed of timestamp and a value. - - Archive policy - A measure storage policy attached to a metric. It determines how long - measures will be kept in a metric and how they will be aggregated. - - Granularity - The time between two measures in an aggregated timeseries of a metric. - - Timeseries - A list of measures. - - Aggregation method - Function used to aggregate multiple measures in one. For example, the - `min` aggregation method will aggregate the values of different measures - to the minimum value of all the measures in time range. diff --git a/doc/source/grafana-screenshot.png b/doc/source/grafana-screenshot.png deleted file mode 100644 index eff160321972884e6811d4dfa7fe7dd26fac2912..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 82601 zcma&N1ys~s7dMKcgv5YI=g^%3l0&F;gLId4H_WKCv~&p44blzLNJ=*&-CYCBz#X4A zp6C6(yY9NoTCAD>*(dfs`|Pvh_YYT9mce;O_6!9D1xHTywK@vQ6F3UWV}A@Z&)RIHrB&smrD;@Movdu^Em2Tp!xNLyRpaN0`j4;kLxYm=J}3N2r+L$SK_=nb zfd2(g3KRI0Pz@hn+nFOQjHdb<8_lx{c|$C-&Uy^iIm6gDl7_@sI;uX@Cm6?VJ|a#l zsZHky*De2t3^%vqVGv5}+my$Wtn2^@GGlh^^UH7jJ$>UY9jNTz9?||niB?RK{Y~}! zImW#JmK`9cYuWZp{PubE7 zDwfKV3>s79^Gfn75Hd}ANGT4-IDZWzK=FPa&$97YLW<7EmoXw@C-wrb{&$U!e-Is> zrzr!^JO(jcD>ny6K$-}ifIN(t@~FdxQIiKhq0xeY(}=^Nom-zDVk|B@hkEoAj< z8@d@zitom*2#DWvd%d!%9)L@yx*%;En?T?r9w{^`|2ZskUC~JM7e$167wVRsXueN2 zz2bsf&Mz5Z^R)Nvf<$iZ=q~C(P=6<$392~?E+?BzYL;^1FtNO}Pg8g{VocN8X~dQx z3Q$5ibo=P|)O+B$FmM9#2%iR17KQrzBXkVO$Fk_f64*OGuueW;V8p5@exLyGSVwvL zVvMH%ZY=?hxu8p5QlPjt7icHa>t37{xKf6iyY#T>ROBy0E2n<8;hD4xaAdx`f? zJ^gklgpv?`lW`=n(RlFqw>?0Cb~s~1iCX`Iq+>J1x5rxFFx@}mbECfdLGTCaG``pfO6k5Lb48wL5;R7% z?%uM#4Rd*J8E&PNp%n39ZmY);y%XO4b|vP zUCVIke8s37In;mHPucyof4L85U3}H@M9-V86(Ei~*1x@`a3XWMebR8sbrS!?=aYD6 z9G;>H!OU~d=ePt5m<<@+L2@~d$76RCBWV~31o0KZae9V&`g^WCn~ujv7(X5(jnPy(5`f!32%J- zrc)iL15`3?=~4@}dq(&9)f?0|PH*bXGS`5;5uc@Bym*tmCTFT9pnoHS%CZkChSgRImcIGx z-{cNHCKM!$mj5W9E1xw;tkqo7VEcK`ahhnF$@V=zL7i&dv@QBHXK9;}gyHYn4|U&6 zI)6R|Oq+TTTykG}?1fG_mBiS+w}shayu7kAnDQ={n{=O4oME34EfgwwGwwRRWkao;eyA(!vW+F?wRP>aFrLGt(>P^p`1HPV3l9{xk z-e%wC+ZKBJB)|_rj|jLmyrss$#>v5{C#xliBq<@`#je5L$Dt;Vi!zAfjVy^eA~TOV zjM9i?i}XWoDtSD8ylcFE!S~{Fma*EdS}&kgcDK}h*Iw}Tu&mXQRjSpcRp8dkbBS|< zbE0#>hzp`#j)k@&yKyfp@(9g{ZlV}=LJkCn8^!U8EXp!p-zJf8uCfcYS`6s+r|ueTivE`J3irDCY57BIF>zP#@OD^f zHtNu`=Pm4DM)6B= z^LByueSZ-+H~0~_1#!gll7#&&rTH*)6w>yPkD@N&Eus6PMj~Fq)Z|XxyZB*3Oo1AG z7yUF)?~^1qowv$KJYIKfWUR4tRW>ktGkdI?y}P=*xr=4Bv@0e=<+b@M8pv7{I$uiF;}iajzO-{C$p7zWznKzV{jverIUr9MWcAUILZcA3#%1qrv(K= zSPc0Blzc0HXvT7p3^)(ZrSmXi<5A&D<`Cw*<00e$2{OF>!tte3F?slGSfw=gU6x+% z_mGaQExq`<#hLbjv!Ay7B{mQxyBUiAwWdJ?#3 z3>H9LIb0)LLH5xrOPPgn$?<}u|Jo=(x|_jL8QnAIdI`w@rF(cN)$a!VM8EhF-HoEH5kxaSnBx3!74m3{>H! z$0cpE@<-J74UpOUN=rG*fK95+DD)|Sy~s(`CqtLJZ;PEF@==3b zgIu6faI^8I{oAr#>F+j?{*JH-Fn=7whKnBzfvFLxwp3eXycx0 zDmfqTO^NX03O3qnr*OX6Bg|qWZdqy>HTG!t-AY@ptKYwx)pwj*dEaj1RT$yfl z|CAAm;fcY@900NSl-$?edaXmr(e2SkaeZ-C{E6-l#_{ZSrm1s4?YP@wOdu|w?8_X- z9@~xKqOtd5LzY~j#+XJ*?V}GGt7E86kt-SZLx+{=<~-)zPQgxvD-Mv#+xD&QHNe+( z;JOOa1S2De$X^|yc{hLWHk-V|#l@%%=?&mOSi#E?em}1JHEc9A9v8$4nLkw>_(VYP z$Xgo~MTqDWH>{FzaMX7z^@CNwJ@&_rbzNgIf_jvx#z%Oms1h`5YPhrtAnO@@75xGX zK@@#`g;|hv!g~k)(l=5WOk0yvQ+_xo&W9-e1jl@ZYM^oqohhB?c{1n!_WDWlbcrj z84V4MsH=sQu=?w_|L%_bB}V(+-Q8K3i_6Q)i_?pb)5+DEi$_REh>M$-iyL^Y97^i2l{#|7!Ys%71j# z{ky9G-~aUdN6Wu^igNwo;2#|QOI&|FMaoP3nJCwPWiS3rd_A-nz_3JFUBOlkFzaDl)FP#wRZ-!5Ep&V{9>p5XToWpeM^)?~ z(dYK=dsA}i>gx1#bb+z9fOagH^2waUzo(y;fK8X{YlpWi)Y-9RU*}RI2DrXsC-n+{ z=LCn*>sT23`?uCQF6%AS+Rz;?)Y0YX1s*?cCrAMQdnxL{!N%PoI9i@YUxhv3N&ZJa zn!hWTrBgE&%f^np9+wGYgDL-R=`?@xZ$W5Eu7*BeGKQ}amnaIVR;vVhm@TuPGZ13R zldyPWuyDdpmVfhn`nTXpQcnC1s`@wX?~bpT56KV*oK4&P`2-MeoK_-`rqrs8?1{vx z%)dF&QiMtV$es^jQWMN|p^R(4+%Q>0-%R#u8AGv$*QB}TuX9=aFt1x44V+zT1P67+ z;5`HVdqc8M*m8M|R8j-@kKm9T#MmZv8#VFnWn(K9;(P6Vp`NQ(6!p(B+Zk0+o&A2^ z`unysNIaF6*@=9Qbe|i)sMT#%YqBZ~AGx4^ujK{!(G#VY+!dLE^Ve9?OKo^J3O6#h z1ZN#TW5V;qI6IW!b!84N9S4qxZwGeg-$a*Bf%wuG_c1((EzCvcGL_5Q+nY`M@jl&QZNeH;>PL4WKUm0Hi$0J=tHVD~%)ZVtq3s~4F%CF9kmTH=>Nj4 z|G0h=XYvBfrd^TH)g=`pacjE5qEX=emH4lzJHCuF=4E4tRW9LV6)1kCU2gVRuS)rs za;STD9V1W2S#jx6m@g2qlh zxVs)2U+>S;S62^qbUNP}u{=xoX{#^B<2y`ZGhL$f>xkfKe8fukZ(yL$D1A^Yt2+YZ zJN!PC$F3kU>y_l~^)VL^2#mx5($jpC;bH0v znxzIDKI}Tm-A$`7Je+3zoI|_*nUn+=IF8>hv7#I}>V^gEOCKe_^LmoWw0ECZ5LxV1 z39qRdwtmZK5}&oa>1kw-MedEqrjX2;c--+|M8vLxJWsOx#9-5MvcZRga$v`zk;V+} zz0WTPXhxHp<=rSHuji5usf7h%t?vmjLEGI$Tr1q4l*cuw*+-|5g2>K~w_!={v zCJUm3XMU$lmg&|Evcb6>jYiV=4fFI@2tXS1-Vat5r-*6h{zOG{#rFHlDJVn$rebd2 zK8|z5!HDTc^2ynD^3FnT2UPsPw?C6)ynJr%mdR~S(E;#<>#ZpH_BhP0v>Y`@nBT;i zRUfw;x5r}=FlySZUc=AFuJ@DWuq}IBul-|f+zvNOaY)19wK@?6Jx!0*I=JR@4aE0`C7{wcH=4oLxTNn7kFnC z8bliJo&4%-ks4jU>k8?}e%QuratAJV^$nUh5JGhl1Rsapsh~jwsZ{KYUanC0U3Wil zP!}=Ct}jXaaqe+FP*yYYzO^{JHF73gOc_5 zmy$2d78|A2uWaT8=b2|qju_hXV2FMQJhZ33UjwEF(-K)FULl(g03|Is#>lVtMQyC} zdkv+*fB!t2JS%fq?|V)~Rl)h9^{4mhXx z{u^|`#aESU#~Dio8n^k^hx;C1;~9xby2Y?vd~oN5UmdS8Qa^WDxUiXMR zuT`^G;E?-m* zqZHj4#J5M)%$#9i_?(k*%Y0B0R%T&gy_}pJ>{UU--a(tPigwZf%f+k1B`6LpHc&tF zELZz@rP>Q>bV+({;D`_IOG)Z%je@m0-om#$Zr==7p`!%N1iKIuXFy@lAUl z&g@jk*`3R!g=7z6;&MDowqNEb&R=?~Z3&9^Q6p-TiR`hnFRD=1)%U5k$|C>9eBd+V zod-6X3l;u|wyR|?XnB2Ne{H%mlj9@T~0f{vTa+wQ)WT#n+}#+v=Q?5%Z%*T!+c{!haswOu-`^g?D7JVI`2BK zOj^FT6B~I;_UeqeUF@jw17^Gq7x-^3CRL`-!^k(bt+N6cA-9uR8-g7-QO|o`fT@%R zfH)ibg|oPu0n|y_$#BhkmmL{i2E^*K zWb4M=31UnvA`VU6rsgT)#c~C-+Jn*}EZuzpP@PYFZeGv%ObQ)=SyvAxA>F5_h}RT! z7fL{h8%L-0f^W>{Cf``&W+s32ph68Z^)Z2#m#9IBQ&XBkCI;K9txhA&-QRJP`)P-h z=Qz}&2=lY-mYeJJ&>q%F%K1v-mLq+ZEw#&an$)B4?b<%}`o{7UrbA6BJwKvW6;!xH zn%l_B%LiLJuLta&)~*pex1Si%f~_KU*?sM60t~~LMzK(abc3n;yn9_6E2dmNHBOH; z5r7BSm%L0mC`A2bzXp~~JRXu(HzVsQzPwad*RU90d&9Zk(`0kd=(6Vo2hsR_umDgL zS-5NujM`!^6bd+~o0-J`TSIWk_XHfr4Rb9o)A(8GFhRe71W;C{?+Eg&g`#?g`f*k4 zW9bfoAN>2bQRGu+DS$5{yB-t1`Kjxyjyx5oRc+o>kI}!7n&F;G+KV1a&ry}3ALE=a zT9#SC!dCa%%(LcdeQKW9c(;eTEVp&eDMyypTg&F{lsiJQN?nk#S7mVxRh(K&$H@mhBHq#4d$C^P?!uih)T$p|(+mFC zi;*@k$F-W^Z(aN;MBtVBMO?Ctr12hOTw)&9E}6>j?*jjPEb{tK06lUK5e-r`VL4+t8&_ zBR(tzShqjQzLp24`$c#h^a_|RdIKfrwUa70V@|p|Ir;cBT%!W33FP{|-Ob$iUG5=W zJw>+gpz$sMMEW^mu>+CbL&n76)de7yz%zAfJq;nd2ACc1S9T;hbs*lYt*uRoQU_eb zrBx)$@!`U6jX$|mu|-8F!lk!>Mpz6(Fbd#US(j7uDHiL!VVkek2)g*q z1_8spb5)R!QNX1Z19LLFE-gV+v>aX@p^Yt?dtYDV~Ixk?OAU~%Vw$& zh>!Q4Oz8eu;dU@y5JkrHak30iImgqZ~UMMq89St8Y%2bk@8N5ma3PDxS&V@a-N`{qg-!|S0R%thXkfK1l-Pe{6f zI>A84BbSV<29x&NswZZ&LiuKcAAoAkts$-7trvtaLH2xG)Mjk6brEw6eQm*ptdbad zV{M4YQ=`-7qbG!BffMQ&+6Hu{Muuk3)N=aCdM^35`pv~O(XSr)h5UX(7eO^2YFmW8 zu4j_!q7*)1=KpR^V1V-FgH*$>VQ{Mat5^IvQW-SP-=klxhrS6j^e-4V>T#DPAHcE@ z=_6YValxx{L@dZY{s9hTG{iHFwNIfQ68gEj)SBaJYfo_H#gGf>rT)7M|CK9e_Q6qV~sz>>l;`Ge{x`lNq^@Fq6AiROMzg&V+1ev(^Gwa_zZXC~a9jOXKh9P5K;gIC(#L!6rb zO8j@g+Igpc7 z0V)ike3vGwsQ?yi_y~7#k-q7_WwKPclz!HtPai z27RHM2MmD(#91H$TZrYZ+lkxpmWV!-mpxFr>-)yJeY5nr#z3UQWjsc`N{p=&Os!0V zXMksY6a;k}6D!P%=Vt?aI)OeQpq7jE4tqTG3&!_n#2yoU>%myWth!HALn6gQL(g!_ zh*N!(7lU^Ps%x+$EnZ6I=>|g+*|eu_%NK(t4B$fo%6Z<{H(q;FEXf{}ubd?;$8>|U z5SKJ-CjjC|J=m_I#J+z75r_I%43GTdI?>)M{Y{p}fV=uZ-l;0dkd58R!icu5hx_A) zW(*L#`}|fYL3|BLwmz%9U+o<95MsRzer>mBmP6K^jdSLG3iF99*k)UIj zi(AEPghzdY1zw4HlZzn+&rYoaGD3YfFQ*674MyLH9d!>Za`qZ!1<+_XxAuANTz|Hw z=y}{V%nGl??u3)9bx!&oc8!4@1H(STe%J{aN=y8fG}7t6`DD`F^!+_~D2of$O+n#0 zUkH5=vokAZrVjVdH!w+N$@ez%DC0Syc_^fBCmsdGYB17SR3L-tmnF{_N%yt$heBTU zk%Y`#NA43K7E&rU6EdT(>-w_kDJRPq+Ptwz(XxZLJr2G*(Hbd#s@7K*WN0q03`Gcy ze6g_#5DR-_U#Yc18!;^m=rcXYnl_H=3`3AF<|s_z>$gERHn&MrlOiZV=3&U7r$2o% zHg2)M5&G*cre(Ir`qON!RfJU7D&Gl1`#_5HRgMQF=tpou_a*QO3-0hWdOd^)oQb^I z=(*|ZjI@)BIQMFLs<5)?#QFBYLF;G*(y*I5A#*CPiAq|_tjTL=^6=s3xs?x?pqKqc zYPsGG1{?=ckbnmG*h0uzs7KTrPyWQj;tpokrZ&D1Xk*3o5L7@gDfcGk)idd@r^NFo zg=fpvXlJt=eZE(M@w&^@?rfyf`{av34(m@}Up``zvyhjBWe-;c2)Tc}kb!NIpvxsB zHnWYt2_cua@JL$HP!aock9CXYXLRLK^n=qzR=twS?!tiRs|d z=oX^w-lsVfLawjFg*X~wEkiHYX6XtKyM)X4zsKZmJ7R+j=CYTrX3F(#eIolmZL2Xx z%!+{FjjWf!s_tanV`%~Cs%w#u<=vs zA;V)OFt-q`%w2MPuYBz{g4pL%Tz#F@BRlU!0{Yw=zLh;WE~E{n=43y`60A18=%s7E z&>LfD#A`^pquqZ@+9NvqTx#%H*b|FmT_il}rWP-95gFd2s~eh?W>pFSy{ ztm2YP`exvE^1E|SM#^hm_RXE%m*+{ZppwHT(5`cIT%wX{t9{t`g0oLx@iO4;s!r93#6;R*hpgi3qc9%ln zY`+KQOw<=c3mmDmHQ`5QhC^=tGCU)OSLE&F15<%C2ewPOJJ%b#{8bd<`geD;F>SrX zU1JL$P=|nSxOXqxBf@ZA*LXDRZAG7)*Dfp55RlCJ&OMw^)t-1Xn zmUu8B$DMm$`Q-vpR&L8W6)z7@j0TZQB0iXMZG&tyf1wZf$Uyww0}oEY2uNY$7kcN}Ay7&KgZGFL`cm0zd^PbQQ&m zA*xcl%z4!GvbqW&NKh8jd?Bgz61KTf!l-Zhthl3*Tu@dWoUp3q8EC1;?>1Cjvkf=W zt*~aBJA|^DE$(UPgt4gC!5q(}@KHS$!*^Kd7wfxPj<@sSS}@ZVRkAy-hchl){f?V0 za{jA79@e*rNO60tdsFe&`wx#$MFkFjbpblcY*s_7&NAvrbB_9=$XPX>KV&pBt|NX^ zuTWFbq2gyf;$;J3#);h=wHy->fJw4`HB-rI+qE99GpNIE7!lxxwt@|M@x50#%f48* zq7<54Di3G7HnC7GyLaj4zn^pq(6u}fd=%@W{5#od*f_3B#aPywEMH{CIMO9scs%xm zM8EI0b+l~N1R4}uJuLcmD-(fEHNe`kGoAxb2dwo9ka$9wiJ@BU4u`Jk-Pftq$}4D*ax8a-#RxTDynr@E-ckqE#Y z(G>xlxu5~vjD??Lnh{=*S{b$LIX{zY4Y>4e#{dmbwYCR{zr1$+QU{$8k?hO_M;Nd-Wx2cixz7k;dp`8ssrXeTC0-hvj zBHqnVvDMk~QQySx)t04pP6b~U#Gv%;4IR7{Q7FCk3vn`0{t(12>zWsdCL*Cce)#x#x*%PjAJ!DGOSMSzfNEeAJcE1X_ducKH zCkDmtu@)I-hWrp%6NE(B;JomElTVL;XRlAHGrb4sZ6TVs3w!779(OFMr)3ipVhc3!-(6r zW$U#qOx*qXcOQXec%#qkddCg&j#y~438knK!0zV;FVMK$=f=EE&O5)$+=+qJ^VlVZ`8_>LdbbW#j?JW{R{S zv>wxQV}h$=+XRgb3VYIzj>lRk=jVGHTD;b)Km z$g!^w$b0ipX7O1Nuavg1)*O7Cw>AE?AA}c3(SMmyINQTdu~Mpam0L!Xn%dqo8-LP$ zimFaNYdEuu=NIdVjG*tutFmZtZ{2!debj?xpJ8X^Udq^t76xZ^WYr+%UMLzSA-ykO z`?}BWaFkr~$OM^9)8LnfvY0K}_wL!L@Jlnk^@lCmvETs(yS<+T9qi9NKozIbj3(7Q z#|r!VL&wpAaSv_>a!s{G6}3+A_#89}uR%y5Y1c8ep#I>4QCG7R1Jf$Fp({p7g!zvK zYdu}Hc1U^TW5Kce1{JGtSTyo3bXWxOij7TyBW6OwqI`Ed&NOtJ<`MAek^?cgT)Hnk zz+WVT=6>wq25G{(pEBNO=gkXKS5wxVbj#uBx^;&Fyi(D-@E^EoDnVUkB1+@bY=)f^lp;S+%CgfKups|^z*_BK0!U9uIBaGRtFlT4%aR5tM| z{c#C?>t(Ff0Z`i7AfyxXgz z5DQ_Z^Z0VuIMDT!n$vZtWj~p#YizkSr80*PexY=pDU#|xaP*@$<#KIAPyEwZZjQMk zMsc$VjUPpzGrTs?9FQhTqwr*&4-Y@q=XQ(2+;&xH>GyH_B(S0AXUIdBN>*TOIO?Nt z>I53{xQ~HuaqaUVPdcMSqNA7@g2E!S&D=8IqLB$!k9-!ryv4qTe?F|Y zDyCf-AnCc5Gamb0)5Uv~rfWj#51`p{uOSKJ^N(q5W4xl4b&t#~B+DWl%WjG|N;>3d z&kV|PR77133y>E~mTaYgw<4oVd!}mEx?f!`Zxn3oMa231JySxiKU2T+Kr2Toc+12& zve4O_M-3YO?Hx6S3-uDC~BQD8%bcJcc>IN}EHWUHl+vZSrZ?j?J&DTu1Ebw5Dv&$G0wzIogrvn37k!@DkrDAj1E z6i+f59Hc`iHVCVq0;_5IR7A-lS5 z%}2a`7a_TGD)!?>s-K8>rcvjOOxI?gw~jB0ZKgIRaFYk4Rm7QnLRaT%vwVc+!2H9_ z*_@4j>}DZ3+gx%lPVdh=mkGb#SRZ8S#3o( zEo+%H%OdCVjQk?u+v0pb!;qa4jV;lNW&}7x-Q2k1{CS1VR|eTAireGqTj;wo)%}1i z6~<9GhNe_clZgc4Dp}AFo znc4EFJTd|qj#rzz{Er6f)jXqib%>Tfo;Jar8>@7w2;O(zM~vYvh9JQ0z(<$c+cqs< z(>zT;o$Vid?yLzdLo0M!X5bV+;!{2Eb`@W0kG7KIPQ*<{ASnjOU;7MwyL4aAtN-!A zy57>e9#1sI{}!s62I?0;#Xzn@-oe5Zdj^Q_ICEzJi{|~Uyu!`-Q1)5OwSKV;0Hibd z<`%f9^58IXFD=A%|B%n~P=1K>S{&H}%&&QWrdKq8UcNbZ*33C%vo}?oi_~uXe`)uZ zlkVWYtosWl;?+WO$DOJZ)pyT$-jBVU!~vZE&iA)}B@#c(R2n18S|{`0+zHhcI%aCd z#7py1>hg%A1h~#;xX{k?C}*kRIqdVaa}J*R`^=$z+{})2zyn;=- zO#}rWVUZa#;r1+s?eC4zq=xjsyUHiFv0A|uBQvALNA;w$j_*248LIDH^u5C08Tv;J zcjME^5cvTP*>zo4f~qI4*7-X9F6u=sh36-@cEhz& z4T)lune%J~$XZ+A?5|^6F!yb~&R2fpX#FF|IQMOorW-e>=ncN+?U zG5SN$6-O3G0G%eH&8Dw%GqWWkEnrFOJd|A=UilDmQ0h@=(?8G-7^H^U6wjOzo3}xH z`84mHMS5bv)rxQkhnLzX&Rz$3a*4K2;1hl-fTqJu&Sfqk8JdY10xG#9ozV~*% z1BJR0_|0>J!vmMPQKs^j8Cx0qR3$$*7v4BmPj>ynwCCF>se=)Mf^?FDv7mdcZF6%dpb}x$6s3*Y_xoOoN7B!X-lX*CZbBcK6SeyS|qhmA*`EWekt{%EACOl$(C7(BXBvgEswhkS)jL8D#ft!E4!rnA|hgYyyBoB$}9(Sa%XA zdrFVsj;2DIoE%M*^{J+G;z@T*X5dE@OLFFbiV`Ie+1}*br+zaOHMbbRyuGYk62QYR zhWr7OuDJ#_?Mn4!2&sbELo|@*$vRt32C?~voQuVUp4?^%>ra+`k6GbV5V%XRVN3lY z8X2gqFr?P*N~5WIn)xT9H3~i->j~lSTMH~}X>DrH>wkzBRW0-`g!UuDZvu32dSmkW zPiN@@XXv>J+Tu1GPUPmOjw$zcx}sb7i8o$e99>Y%P3G+OzhqhB(e&L ztCF2&rsHyWOIh3wdd$bDB>{U>SZ$QBbw9`3z7nsp#Nk9j=^d9HTD=6y-IyJixU1Bb z+DPLOhlbe1$aj7-9++x19-Ntv8N|9HCr$kjLh_RL?tYQv0Z@GsQ944;-qU6z!$?-o5Lz(7O1ixztxF{9&_QB%k0Waz2yA8cg9aG1{L{b(Iptf~qT{ZvPk9 z2wW}9&ni>|88^1PQ?oW!Ij5 z#zrC#w=CVV^*;bVB!(~{{si@X&d1RvkG(0(l!I<0rcz*s=5K<$jNmLZ%LahhMeJ0C zq5e_xK4%95by8=w4!VLj=D(qqNSs`;$*MKS`5RBF9?CR_rGV>Y_0o47#kw^Gtca&D z@-b7g>KksZB-0ztzq8R0LZ$oRi7WqKpS`VD*^VH~ss!zCgdnHO;muyhoKVO^+s_6lnC(v#ks<6+qG)v86HZVaaOW9wPJygcJ;TBZRQC4Rop{sB zOiTOepBvT1)ff!D*qvnH=hvE;n9$6PRZ16Nx$)W>CiU5flVOJc`TVU!tJ>d-GX3HPn`}FjbA+4h|nEDmuzO@xujNV#DCjOa8uRGDH z2Z)XRfr5@pv5}G3ZoaazVSgYf;_wscn-M24T{5SkdZj5bIG#~SJ-<5zT{j4cnPy{z zhhUTD%sia-Qx73aX$(9(tWSxI|;3(-Dox|txX3D zbqa~R>o?6F`-4bm_U5#~`K9Hc_7d5W2$x?=6Vmn9J=e#mHb`aA{q)az(T zh~eP*^XJ1iG-oGEtv=@#zrIOUJ3+<&xgQC}z7Qn4Pp`$NVC3^TP z^FHi4aSojF{}(PQdDoO+Nc$tN^dVu)JI*c@AkVQ z668E|C3V||ge=Kl`79c7z)$oB&L+(WtA2>amd|)li~G;L0Jpbl|HB5v%5%b1%@%5` zRi-bx!jW#jf~KZs_;w}6svsEd&2MPkquW5aW`~5G+WN*&i4B(tCNwIi^7guviC}af z5uiKR|-_T@Cu?K=kt6#$JtJBsGxk@$+~Idu_QjwXYcNkpZjl&C)%77X-i6(vwTU zd#;-6IR^QFBSUjDSSrCEqf--~n7BEa;*Ugnv;L((NgVnG^7@zN38O}>-fD>xQl7x| zQPy8+{B{L>UEPgd-c<9R%vd4LZnL%0SRL=m^-{Y4=IiqfPtA{aZ@xz(QC9VHqOl<< zg~$@WxC2)ZROGmIy&AXdZnrRtv)<7k_ucCDVVnA2Cf||?E|GQ7zEz7X%?T<`T^nGncqApbp+Eq=4Q)>!Vc&Ck5<_JGQ8>nT`Ja~a-FuB3<2*Q?#rcA ztmMy_9Z%MqP4=S=S=pTLD%vIrokD!u8{&HFAAaNWKlGNWlpeH<_QlfjS&wN!q`?kL ztyKJx~AN)h`IP@7a3UfG8d|qex{lij>>l?*z{K8dRRd_bNi?FGx)Z8eaQqJ z%L**7_PJWF2}@?7Z5^s3rOD*pOmui=1dHyggOlr zz1(BL_kdsMUf#wQbia+fXsm=i*H+;*A4fUjtzX2mNCp@ z4?1pr8nKap53ulG6VtucWm-j+{pnN1yxJE+GwNq2{jOd}i^d>`D?g4|FDefjAuq=l zU`}?O??0G6qPu8)xf#-j5x#g)WV53j(XS$QnP98FO27Y|3Zo@gCPtoo{nLo>Se@PM zFt3quFm?47(%aqPrLNxn3+yOJW+SoXakMmVnJMJ9Rb(%2@Y-+D2nnJ3G%a_)pFZPx3pDX{{UVJ_84`CL?ysK!KBzvNq2Rma!hYpJEf=0{rkKF1~^4SzPl}` z_(zqtkoXXQVc0agDB^yIo8NVcKSj9v6DIcs9MzxW#?+u$lrv#e@2C*%8k>IKmM1o7Cv~gP$d}&h?G-Mh+kSVU|2qB)f(k6 zY}WM1j%wnoN9SE}Txl)dl1|opSuHWoF1(OahL-Atyv9pfIC|3lBy#q$a~);Y&B&-< z_8oV-?bMg1d$*nqvh7`(o10Gu)2}^IYjoX|&#EU@63z&so+)p+qcz7P-q?8ozuC@8 z?*dr1to-LNdv;Zt3{KI0{352RM>dNWtAocjAb_mVV&E!H&RY!(cU^q-yDIKiW+>A$ z0lJk&Am5Y&%hoDTTa>b7^R~(sef#`-qRu+;z;*B}(4+^>#-STtIBKZ5h=v*3f4JzS zI)RlM)>uEDgTCLB;u|Y+xi@_+2Rjt;)^R}pK;Pw7<>E2{viopLevCgEn=fcOVL{4k z-?-=c{#EY583bopt`%0Ufr_E>m9*!J}{9mK(LK7%;8HY(H;dASJbm zpN5i1SIFWCsCPX(Zt*!MlF!i_QsOC~swd^~_g%K5~Ag5{Pi%3WiVSH6B@D`o><{&n<_VU7*WwCd>R=-ODb z86PzwKg6wv+sdpl#86Huj7PN3eL%!wde`TVgEHUbRw#N8TL)u;TJMopj%GZrV)v_0 zd{)EGx|~zYIGwlc_qTTZfDI~o>Y%Tci~IC&6y9s@D`eK$M@?MRevUhN802)E_?L^w zf_yz-xvn%7m|v5u+J!77#+naw1$s-}RHCKorW8sBqEQ7?zfKg{&E=bsiaHGpnD=Zs zk;Y`BA|@l%nz?bmqIi-|Bo^zS0(;e-VT3H|`BEh?tP9C9C7{spDJCD%PO=L^zO(#4 zyuD>qoLkc-9K{0!lHl$h+}+(ZNPx!O9l}9_yG!sOf#43oEx5b8djk!8caw9T_slcj ztob*8vRHxc-gi~)lB?>frdkYNg|!Qi`N?_yA!*h$N`1=pa2$s&qs`suFIj9OQf)9$hPx5IB3Yfa$R0~v8(q}SR^ig9)6CD=5WqyN8Jb1 z^^_VXMdPjLb!&chiB^2x*hy~$HzZAMl^X8RA16T8(9&dL!$xXB@;C$~?O2{A7hRg~Fy(C(c# zW#-fXVWkXW>t^xP_g3Wv39j_RpvzhQoI3C;R(T-<_T*aIXsQ=K?0<*w+g-jRk!KWR zOTVuaPo~YlVKlFzzHyp^dDETKtr659gKbDz+Higgv|$<#FE@ETLW!P2EdoD^ObSaM zh3|BMd?wnU{GZAPp_KqMQH50*0#D;1S!ONvrF`eYolvsMbL=mR>81NF|4h}-vw0{| z0Zx)d2^>-Ez1|KnUjQNklXS2IXkIX6l@l`Zwfy`efw#g%9(QqTJIXQqdc&@3XLnaB zRwU(db*kAJ&9^g8P%F_`Ra?O0aaC}#olO4xF!Vo3V?proI)K0w6yiJ|I;h2Am(}>&;MAOt`f|Sf7!26e9 zRK(25mNs$;qf|&CGC1;GiGr&wmUY8>V4nw; zzfgQY5Dbvv>{Sf&D8Tu;axTRDwu#RGaW$C9fGj6}SYbZMy4tmAMz+vFa&0g2L>T=8 zkC-NVio@r|izLAJC&9`poZxoHirbv=<@!VO?1WhduP z8kdT`WMdqF;tGK7pK4l(0C@gh!nkCi534~c6stip6maCc`M3>9LiRi7*xjjqxNTe^ zDTJMOq!5*axY3`&10Q>7;*0|WYc4dw`Zzz4Hq;e&PtU36WlhH715Sj06P`Kp{E9{| z_{%5hEwqnr3>te&N(SuJMNOr7U~)FUJyPb}chwq_P*$ zMYWPHsa+awN7iVN@M0hc325(Kf4`P&Zhl7WkU^Aq$@cAzuHqWiM`l`9Z=%Jpm+#*7 zp}mU?&3kF7vKG?-S0L>9cj*-8`UZaffA?np3n~OXG8RV}6mN}FLpmkn=}TD`qk=_~|l(mp19s8!amg;SoN(Ak%1fe*&0l=i_7aa zXQT_d2@rSOcpe2@uP3Mss=m0|6C8H^7@1_Y4$p-WP3C&%d$IDH1lmvf5-ojuHlWMH znPz#TdoogIW+~5Wk2t^oU~EsX&N&gz&+uMo5=9gS^Xgghg{`{m?7+{~I7Fhk_aeMP z_4c^JX!zY&hT6`sbb7fKzU>zcfk3`3*7Uzx@6}+62qV*{#S-t1u_Fy51O7ve2QWbu zhtj@XK{{VdmQU-x@Vsg_QpMIz_D-fO zdugB=WCwbBvyTa3B!+z)Xu#WoXp{e_50M{^n`II1ja4DOdC)Z)#P|Qsw@jljbsG~Vcni>e~o&2Ye`J$?&xw5 zEgV^?tf zsl6;47!bos<#s96iy6j~PM}qs^^KDSJQDPo{{K`-1)-JzhvRXx5mv}ETg|cblMAi( zyyRH#1+)Ns#(%5jJsAX`KcgzF*1BJs|KI=CcsK(O_*=)_F>|_PD3(_kNMULTKw(0s zVox6<@uc=YhRPK2=V=gH&*F(kM@K7s9v{B+wg70G1hAJO+1lp%XmRuIxRw3pYTjn^ za#}A@xj?ROrOoeN7rI;_5BPI)+Kw^n`QW!C8;7;a&a*8R)1`%&%D)uBr_YA|w%z|S zhOz}yfF~xC%p?lm7!%wA(Cd>q9r6K=f7b6>Rm4^Rh?F)rFUrl!qZ+L-8z(1w97^LP ziR3v89^MZP2~pY?PFx$a#K{D47cX`gknq z_9w~yZHr|SB4T1iOe&D@@UZZ3)nt1pCc5aM{zHol==>-%+1_0pnuoQxoo`7B3i=}9 zajAICaB(ANzj^V4=f7}T^`my1YSiuCIC%&GiT^LgR`kxrBy2G&S&$nc&`FH>%580vqD>_9#l!usbbJBql zf6y7UW8D}*z+WmAO5hFFPuxCr-RO-TtF};2w0j^_K23Roc8!~5Qvinjdk^cGpZfME zD}q2BP1_$(UTQRuPnAHDs1#606|j~Uj73jYY0yfEFnsHGd6qQ-^OHQl#-H7)fx|CUwW5E0FkP2 zJ8Cg0J)JI=LOx-b|53ZEYtcCzi&>1$;p#v$-fTw_Sx7*Ll=!L_GGA+x=?NgpF`1b* zL+5~T6(LFO+f$}`A68FKEW19sHbc2Cg|!aba+fJzzkW4eq>#^uBN7h$j%L^^$1}A* zTWyg@&5Y0Q-Q+$9ERvwqoCP8i>pg(cNx=}0JFNWj-QyL6WXU8$eZ?OeHSE(!2t1f= z-Y5(e-dHcTJ8QJS+dLmoJR6D^sdwbmiO|L-OPH@dXXP475B5QW~|PP zq&xDC^gqs#3gSrytls&i?OaoNAn$y4z1F}}ap{UdRf*ePT@k<9evStdoGzBb= zsmr6;${x`aPG7s%{I@$K)h$5RL(8aJFpSM~UDkoB=OW}=037+leHnBAr^EKpmSfXaaal_d7u)0QG{D_!xe=3;*$qHfL zrZfo$*PX@6X7ZBd=kImTb@@M zA|n3%8yy$G(hM+5AVELq?qZh&I7vQr^JR@!l!+<%Uk5z^2CVi22g@1Qkf1UiZ5O1Z z>2fR`>AO>WB1~12fvgFe9Aqi%p~;wwXYkQv2seH(XsAt1VG0TtFn6HL2aC}MWA*&! zfg~^o#=h9yB3z+F=Ok9hWVtxH;q)C$N>(VP_JJ2ZRM@%Gtk>7r8+v$@F`*KlRMRZR z!!1;`M#|_+DH|{o7azKQS)hOhmvT~QBv@4s8yQhJKW4L<%F_1SNAXBuVqdUcF>e+?n#HMGbV;SzgXWvS186{IXEhBTNa!(a1{6k zCvHQy2B-U`(P=+H9>AMPDRtEuLd%~Ky*NB4vYtWiTqamGIRu;#A5V~yk;^>5Y~Ydm z%)|fNZ%jAt-ur6SWX}zRpRF>N!YTm`CHhZsu$;7Igp5o(etW4Q#sGo4w>kKP!bm_Z z#_?!EW|S(s&>h)7h&3V3pPY%bvYs{_mAY}eHUVdTZh#$wm?z{HL#eYW(}{*Yie^16 zKt>`LYUgI#=KCiIV!?J-+yb;3 zLiDjb4bQXtYM7OZN-}&u_%eGV@qf9$w+K=;jQ9kkxc&>%+r*%$UBv@&qGRDX#(Qn4JfZ+_{jr8?&o-cZvCpcTnDT`l>(gpE^4`KbSw`PP>$bW{b(eGziPTuf3-rrwWuo0X`6y|?50 z0y&{T()IfFYkDl}C`VI1{o%TEQb-l5NF~C)Jtod@I?&5U&gR~kYXf`sqINC^cz=vDs;!h8_WV0n3d@t9Np%po%JRf`AwR&Q=by<>{8P^!J(+_;rG^1McEF>d zH?<(AIRSQ=YeaI$1|c!eP#C0OjxHH&2?U2_n|~rVP3wst5&~!(*)fm~WiiUFw!3ck8tcWP?tNi4(u6K&120Sz`N+!aewwoHhXUJ%AlPgidZuUU zrP^ktDE4p9ySlolID^j2KyTgAy`!8dg~iU%4s4q}t`oL1eRNqDM+~vD1!1R_fq9Jh zNuV72I0eHzfs;lBeX^RK%DqUH77!rdvguo|-S@PYjrYEGQa z#pV?43^2(ub}*N6C0;n2lH$#=$6e^s<$m@|I+(+x!$9foYfN0*sMW;|^0cMZF6En) z`}%A2K2_=%+>JI?!gnp{-3tGwFv-gtxR5lZ^$1_({OgiO%P4@Lx3^azyM!eh#jwT5 z#3b?4IPx11_NytNe;8`CnH7eSSMRRNQv8L~zplejCTOL-&YLo8QkDkj#phP6d>mHS z^U5Q|XF5N@G`y>yXA5Qq-D=hzr@JgECI?;Fp^w62#=JQfkP?y;g3*%pRJ;w{MRi5B zMW>LvpoS>4gjx!gDBHYbdKq1Bf>->u9i_bEVmrfQC~p+Df-oAZFi{-I`DkIcCO7q8 zcS`bEit#t}vSvCJVMdaXsBpEnn6dAu3tzSy^__750nGH7J-O!BU-G4$ zuJ3&HGN{meaSCcw07v^Xd%&+={_pI%=X1oSM^MKEr(<6d(CSqUb zKA6dpQdxxOgClg@!eh5i!`_xPY1zGejow=&OoDKaxqhZe!2P`bHDi9y$@m~`iP8`} zK>ONRmHuK(o`J1HXsq^jyR=YB(lEq{M0#}GY2RPDwgUigGHQ0a?iV*e{fb$R0|PzjBujNi8hHkW%NB)hy*> zn|p34--6d}NZqdO)27cp2mgwtnTzvq`* zOctQAgtD4XNPK08^T|e}?M`xt(Et`X6@|T%rJ~NkC00d2;LkvU7Fa-7Ho8Gg#o8T9 zTTeYN9po$#>2ZnDPqgj@yR_Op9~OK;zGnk$3in%2JpNx#ZW3BIBc|ck^1F%Zwpm+EmZJI_5==nAj6l`$h3)Xm$6QO|cula=4l21$xEMo5 zLYZExuZSoPJ7IM6-Z4SThHMzq&J}r97M&Z@#^2?)CYhi^L=zd7=pbjQoAuE1G4e?E z0NsaO8nj7fwcG7HX^^M*?mHZ!#lUUpN7;GM%;cx8ibpXXmrq8Im$=D#t7pTKFPx>V zXkPa83?z}6F?oGTpygr|`GCtP@>xIP_Z+*#6v6K(oq$4n1Dhu8x%X5I2cdALP+ReC z{iEwP%mFX49OSg|DLI5_XF8l-Sn<&AGe5Z86B^j4dUp2<)5*;mz*ESQk;^2l)P&Lj z3y8sk$+ZFqCkh z$6kibW@jCA!gXt}wyKV;%kBSvlwH2Kvt z&G#Z`*X7mH>cmsdtBLmeZsKj+Ce)k_*2A?9_>ITZ%b(p10%qfHhg7OTY^amj7?X`n zy;@sJH~~=%Z7;)ZoACrn4JM$noCVT<`52qWhx-cCQDQ6>Q-yNta(@-J)u1q53BI_W z>rlwcXmakdYinSTfR;wBjFrG;9SO5!_|G7Q=mBPp-p6c84;3o2v2D6^YT z-PEqN!wJ8Nh$1UMdh6=>lNtrTwI~vaIyliRmb)JUZ#6OaR%FG(gWGiwt|SBt9pcRl zWY|DCpf;qjchDjn`iW{5HoG0W=uDSKPdEGrC1knFXz1vw=g2HO zzZ|8=l;ZuDSF$=lZEJa3>py?ma(W#Zl;60M7xV(4o(h!>&}-co8AV6K$W(Atpy45f z|GEfLu+F%_{g-|bK&kY6r zE1j#%0;Lu2jvOQK0*LInx-PC+1!MtE)7`xec*@GAT16&YiasnnkU-fMQ-dBl*$Wf~ zGrD0bEAi8we=g^DfVQE6SumBdSTM`jnCFf!6a~P(O7!hE3;vSyWOcw0OsvJU4aZQU zWfwIkfQhBGda8b)*UZllR!%J*UW&@=u3}v89%v)!F&f(_#7Bv1lkoQT9Gynx`81 z5U%n8r{1rI+s(`9mX{uqTmFhMjn>WXc#RMo3%Isb*htLzN`Z=VcZ~U=&)<9=Mf!S! zV_Uoo^W-y2W7r#l!jeo~<`&6>Nn9Zyv>%A#}X1VjVVVyB; zsr^SCRna*;lzp{^*Sf4fnAnb1`l9{@W3xGXAm|-X*g`keAy*dvQMe;)TwO);2SY_! zlYgepgXrShFFNBMMF*{E!W5ybk0Y}_rVol(6^|c35N;6k`;5No)Vh-X&#f+RGDwb{ z5=l4YE1_#S>2|(E1N&@yu=UG zX;!7Zv*~oGRlU+1j=mA3fs{lN86U3MJgoBc;oy+%7X&InPBox#c^C;*tj1c}Uk(G= zii5J{7#~KJx|is0^{sZ2rRg=jQT9FQnvA9obL>i|?<95{eJD@t-C&@1>l#o^rn!J0Dg zq}BBi(-`Uovls{M!wCIx)z3OH19k*>Y+bSpuk;?iB2vqlg3xH&hhxE??9DN|2qNR3 z{7Q^c7gM&?%X3KdyUCF_ulo-&ird}rWvvvFFJnLp`S1%wp<1~77k^~dop$ziFWVYu)=oBl z1jx?-QNa-HQ$LVHb#*Rpaat8ugV)QUodGQIH6L1bIl36b=tqb?ZWh;V`_Hq(Vd}y;lTJU-SGc(2Em3dQ35h;WR zNZrytsu?|IEzHtCd)pJ1C;c+x9_SlahP2LD{{S@c*ZaZ>E5SSS|4{`PV<5`-a&v%r zwX5Rpqgp1{WAi1>*W=XhL&Wk3VbHgv|#i|RLXT%RyD{kMGZMvW3Vx2cUo6X|Bour9u4Y2 zGQnK@+RI~VbAeOr0m~cP!-eNMM?-`3+kF?Jp`oQT^vh9Hskg-rL$|Sh6I_JIM5s^( z1poqLJ@v+bT%BLQA!5}67Ce#G!KF*kFqz=o)B=oJ^RPY&ToLqVEdqkks}+o~XCBZ2 zYj)`+?rZ_NIQW58Vc{lHjq1ar27RM-2}tNu?dC?rekiGYwK>?wG;C9zT-Y@fPGa4Z zmA@*^kZ$yFl$X6KO8CDWfj|%%sV=9|I{1JSjKdfJCjcJ#jDfmb;+eDTabd+Hp>le~ zqdhY2CFdQ3Li;fNLq$(lA4J$~QtCg~{**Vt@D?My^}22JBXc84I7j|6k0Oina$85l zc86}IagyQUTAJVaV9WWexlU%rzRZ7_<*ksb=O@4-W_tF8#k9V4?*mJU((b3NTwD4d zgmIb1sWq|2P}+@ufw&L~&0>2nR6lGuCNR)DDeljE{Y)&9`u1P8x%j8fds)FiQ*Bnhg=@sC zO8~4SxG8u|Sn;T2ETcZ_gIebNJotb=PJ92LwF1pstqPo4ZSjrj;lshM-e%{Y+p2l> z4SoOH1bKy{WiTQ$4140zX?U|z4C7LrJdAf{XZcdbB1Yzf*4ENO&XB6#ux_j=%H(tR z9aPBoykS=9{2KNBPFCr*2cjY_-=|J}Pbk9Qb+TE<^cL2}A$eYkh;#SSN|PI3l*spoB;53yePQb)dD2oG z(Ak~#NR0P?dsehMiy_76^-D!?Jgz6c)-$oouy|kz6=>zC-C))SOe;NqTFbI#v)krt z?`q+g_u-G-8ubZG;Z+<*71p|VTez92d(^fguYcjItxiW1-A53Tb^dK#$knXX=BcJT zKi|evW1Cr0=Y`h(KC@#w(DQICOX4c;Dl+PZz2^J*Xz%v&$u59i{B^oDQ(Z^icC%TC zWi`>D$B9pOP9LURWjY@AM~a9ne{!kI2r~MR!$HmjIXF7{o^mePSUm&zm(GrG8N0)1CcWySsXl^eyx5xa<^!J40nwIsW(!b;d7LZd}D>tp1(8R8dB*tC&oN+mdCzZNKHj58NZBy{2fOeD0@7ukMgaVmV)xzI>l>3VRA{v{Yq0b@7x>^K{5M3;uIE#@>StHKpx& zh^Ji~108@$pa6to+uzO2fma?55h#&C z0BZgH`{^wuq@OQh&*2~y``wxiP|s+3+M5E4;i3L(k~QHWZiBc3*&apHBj=7a+FA!Y zE_uNLwgRS{jzYa^l}~f%9et=$Ie%4M{4FhUdxlES9TtP0%63`{%{15xA286%qtW(- zWpb;2r7;F7_Q3yAb%;@*;@;Xs8c7n=tCfnmj$q4M(2RjGjGI>9E!L9|afv@c z!xc0|10?3#{S*^RK9q~NNiW490TJmheW*2_MH)tfrS0DO)Ft<8G*k(7E(0DnAI$Ag z_etT;>54u`gAVmexP7GT3pQD4#$y%)%3U25KVbTkiKzTMh^0`kVKm&cVPE|Wdxy4! zfnD_3X9sOk=*{}a{Vruutj)%&M6kB;@EHyxp-S_Vdbg{0DDo!tZjd?5pUjD21X`tk zj8NrDi1KP-*@jDiDw3K$0^HP?Pf?6^6!OjoQ@UdLWarzTD;>|8FcqU6SMN^K7x2?g zJ?4|5rU%*ck}j2W++ZMH4G1jIsD>%$qa=i?%G@Y!17Bo@(FqwlTS(^aZG#sl|kfVYjKo;7~F(@a`(J z>9`%nHl$ro@*c`!!QbrBy)0)!XgzA4dvA2D!+=fyYCB;J89keMtg`l4Z9B9xY0Eqb zjg{?m6Q~Z_*XN;%1jbN${&6jnp=AA(l$ls5@RQAd3>2<#(D1t&ZKwde#d!(vm7Qu9G@=ZALCMU zR;ZYvU{`Hln`6R__v|5;w@jrpl_w>U{~ROR_@X4PJ6;Mc?HP)rf2n z)w~I(b98vyTTw&|xhmQF4qE;_fDwi%4A^=oX=4z-&9cSWnPLLiyrGBq#vni)65@EiLrjO@-zwfo=UY zZi;(uM<(&-cW-XOy%s+?L}cW^z#h7Kz4LCv5|7h+9SZK?@L5!vhbFM^$sb%FjBy9B zd8hC@-&M3Uv5vW#$x~F4VuZ64X-Qgt>)lsXk-FQ|+-5xQH_Y^}niQI9u-cg_=9kyw zLQscUb=j2x0zC=+d)7ILbWW1b+f~*t-2tJ$eCf{0&h#sE&2?7*>1j0W!AFtX5iYir zkr6+!Om7)XMm5I)77;6I)sb5xB0|H#D0tX@jm_Zr_1dnNaCgMWakWjbV%;D^yCIE! zSobK;yLntwhSYW?(PZH>Vd$5;&zoTHyScQNn@Mdv(2#H2qZry62L#uRa~rifaHyoE zrVPYO9T}Jux9Mgad0|>b}$Xl?&+dES1pp{ESHLGPd zxKDJXG!EMUqpDO_BDN3KBzf|cZ_AQx|Jh5w%K2WKGH12PnP6qGSu`MTI!Z) zSIR|X@=>I7Vc6U)0&`)r>aH4WmUG0PTn-X=?-27OW3zF#OP_D}9Tdf!(9 zE)G%0;JK!Y@^)um4R4|SgSL&^p_EJIu7K#-(x$8U64$)y5|2*jK1KFMd`G&h^&vca zeff`OZ1w_{6=MDDo^Q( zmAUrw@Jo3(-IVagmhxOl-q(}NkC@#d(^;$Trn;7-jCH^Bb^})Np9?R?TfFer6;4=A zQV->NB;UMr;AE%O9$$>}Zew8sRsa$S(n6xjVf%oE`=bo>!hvrRf$2evR0d%7yab>V zc{W*XY^=$wfxb&R&FLG)!)`voqHxTWY-{f;Kx3#~F1iFA15f zEo~SY!_lw1cYlB+j2$E;Es1#>#3fvVx~<_YmU{z`53!|!uJ}h;yf{n7c?J4no+u9}be^VWkg@)&pmJd(_#Zw~Hh^!&F+1Pl$y>1o(GG zyfHLZ^}iPfStP9;Z=tL)Jaz^cu_G?2A$# z&a>_qsXioipf34g&30ZuR%nTiL|@j1NuKvyXNcV9pa8R#Nyna?Jc%@~n)xf7M@1*b zuPGcD?cgq=b40u##LFko;a4X? znV&1(g}*TRn*;TX6w@8#dQ^{{D2u_l%8=%mxD8;%9||?zN&UCZnx-pBZRSPu)?{elW8%hj3BC~9uoJ1_- zZC}WNujHPwKIFULo)7nKVLA0(nVwc|3FQv$u;rol!MSJi&&u0_Amrf9>qgv`WfJwR z@8z^kkYTHx+!ltZVv|2bsM2tv&3X5(v^R_s`l5 zt6BPWYajC?pB#D~lKSzU#5+5zo~5_rV<6qEN^nQLd6V7IniFc7UsH4fu8r1`2bv2llx?JR|sBHASmPvcpq)EC~(Tz+s?lAn5gko|Z zX|ZB$V-+9IU2caCvVWvdcRrdelXA|b|zefZk6;@@3_fpoIy@Y&-r8`Vc$I^(ow|67q49;kVW$_)LDPrseCu&0tm z{9+`*S%sLRQy;i?jI8K&=!n5b&@5$cF=!u+K6%T0K64*nWua8{gkpB}!B z8!u2YnvZU&$o|0{r3nwpMkZz@GaaZuZ$;vWuJ|wRVf@=I99LiNJ3_%%lf4us~Q|;USfq% z7gN;jq@c+*N{Lt)M8tFAhZ}hHtG>QUIKcG6mdN>_BS@o-)t9?oBPC0eI@>p>#ahs0 zwqk)oQ7&Y<4VCJ_lH`iB;1tGB2R!JpAD(TD-2t08a3hv!Xp0BpJwm}HD7!lU>rJw; za9qg2KO*`+PH)>dp6N7M1~3NdjS9L=rMWR#ZP8w08nrBXT&N6F6yz3>69VJo>(GRX zMzm%r^p3m#9JaA{*Y9h^u`9u>Gl!1xAs%JrH3}0wpM%kD1T7X{YXJx;G$zSE!KHi7 z0?oD!7C_gajc}&V^Yp6pfjAOMTw-aBL0(NQrMtW|K-T}xTa`KDsFNUK`8^pFP1f#Q zwz)_3X+kdYoEF?%6v=7I#qXxHyvZ+nClLAlhqX2?m`IZx*O_>{mv4>=5uyu z2d}AkJXN5a<@m?FKzx8*td!T(d$m8xoQz@GH1ej%l2-y764LCV_{pt*=UbzxS;=qR z%l!UH=z@jsG?a#!9FZ^;7<5xP$oPZWmZfi7Eab=>!Ec3;h1Jmbw zsXm=32K4C*r*~gm>M_JtlOws-M;NTcCpMKJ=s-Cpt{p#K#HCDPNCC6 zf+d}pk~t5wOM$t7QmI}b2`PFnPWBMyRr#22Mc3UW)q6F!9Qs21AYb%jEAuA9J-~#4w_k;Y?(w11@^?)ylN`@SxI6FNL4<$v!!g+_ z&9ipWVuW>e$a_m1zmHFZs?Hu4U z89DH4y0Ykf$iyU;9!5&s+!p_ip4YO{8St&87pZEcLpC$PqnFEpA}6ZtRr#pND6wQF zi*7u9%vdz5VZ~lWvcnFj+%?jBQ`kv4gVCnqf?!%l=b&)_^OtPc5x%w#_XEt{T{>@L zqO?Oq&cM9NE)+E=632}XJ4&UV72EFq{MHARvL%Ecp*4hGZmmJ_@f-Z}HEB*L$TGj7zfck9o>5x+g171`FhIgh_C6G>uQAZs5_?YSG5Mm8VS#}pvt{>6%jWl> zItVhDs&sOTgYWb0H?R8;m`Fb;S0=rvVym54>sDv3xN$9^3v8`HF?GL}akY2()5|by zpSt-T{>HD#+7Q5Nw%FF(sWe)rqi;wiJW_7P5G%Z8ukCqj!#p2kdn%T|M^?X&u>%X* zT<#ZAro{Z(;GW8&U_2XQDg`hJ8c}1Kh=FRu2%T(A2EIeLRtj3`gw$Q6( zQkE;-5JPY*fuOZq#p+|Fft^kzKCB+Sn$_ z_nv0t?zX#7bWqTi*Lv^>$7{O*<7_z5Qj`~&@T6c=!HsFJetN=#%iXGlR%5YIdzeZy zwU#sC@`ZN6nnw~djgP!+N7-uP&~NM0dDJ47ol5zTHyToeH9&%(wcTTDx!#m-}5B-ttb z1N-E$HAwL{1>FN_>xM>KS)(!B{GVdN2qKkDh7-e_Qpt`tHUVV5CE;NYs+R_OI`M=I z)D_^_$`%=$R=(*8sAtMvW`zF(6A^!4(7hVc0lUP@|Q7Cld z7C?^F0|+vTiz3;zmb?X_pWs?9nk1as_DYLWTR7-KVQ^X!EOtU$2sU)r-u(S0Mi^myL)>-?mp-gQ?cPk&viM1KtXCTC#=2E#+u5m;XyIyeF&4^BI`Ze zR75NAJ!4BL4k|%Vc^TtAkDx<+c~emKDjeK#T6{c0&tzf|Sh5&RJ0Edz@0;Ug)EGcQ z5mwGFe@(k4W6vme6za12D^LQX)cQ^ACy#d73#Z_Ftxmj6P1d&(o>tk4x=JV=DJu=| z{9$WNd1dL;QhTa#CzZ&pYmo~;`=xBcemE5@eSmXF{P64h<}6z3f+!a@6CH6&X#Xv9 zB#D@@&NX9BKd?BziMIq@73u1L&ypl~H_^IZQhvUmZ|4e>9KorPeb)hTtiF$3eh|qY zI?Un=4MFv8C}H=wJ2JSlch|)@4>JDa$8iE>k8&UIFjUnDv-*{h)YziIV;ORM zM*EyP3*8jeYC4Lec#}HY6BXt^^$(Nw3k${EbT1U)R4j2rrK}vU^h(Sj04oY)J9mw^ zKr4XULn@y70+no|H^z4FRa3huCi5_+q5Vu#1`|lcza)9KRLK!?sSY-JcX2V3meUY6 z4V~G-{Jn;BejcwZ0vt{~V@^I{qWCeK(~G9o1@TAIoKDAD3Hh znpf^X6VohVJ)!C=jMTa9;8y}(JKm7}YyqiefrwAqSpJ!1hBS%SP8qcpQJpP1iy^Hi zLx-f)Ig!$Z4E>%tN@xS3;N3CDuvU}dqkYpX@ivgM6qi*ho~MiBrLgGZ$=7K;NPAwk z{gR)B(a1&1dvb`QCc~9ZftNOW+I}n!w4D_|U}w-as;|qCz2J-ZQ9F3H9I>;A{B633 zfmS4CrcZgdhqw$Xjf>sn8@3mm4;d{d=LLq=g#E0Puc&KLf#H}LSRfgXu{t2>y226u ztiLQ2_`Uj^;7WfHu5%9FwJRXHVq-$V=_QYsk&9++E+slSCierdB7G#yaO2D|a*(rV z9QYC(U=UDFge#aIgtgjJ5E4ame@D@9XuK!At+ZIE;umJ}?DUmm-}n67Y3(E1CYV%( z&TsIdcvG=TKEKAR!x`%Ch^k%efv=DDU6AC}Te47%argb;-O|sy4Uki|%fE@`@UQ=7R zGtNL?Z;wrvpmiD8ux$t*LDEL}@IsSM)5t$7$bwF<)URJ^PqP?@x^aSLkk71a=tTX< zsRpSP?O>ab?xDK`A0*YvDc&u0OeCp0;Q5jc;;62JG-)_^wnx((qC zYF?6djI>tC&ugxegEF0pP=O8x*N~reTcaKwwhODx{)bCGYAvuE_5LAH+!#Nj$gL`P z;)1{9{(1fN+TCu?(HG(3*~1dO+M=Y0o)impsK^9H!_?_@6YC7z1lXemxh4#e--mna zOe{g@m&JM{H(*)?w>X;u85NHue)Rk6Z(3en$^9A_!ah6JXkmO!jay>$H&I#hN`{oo!(U}$U?R|`fAjQFl9h&i+rEnTP%bC+8t zT($zMO;L}b`E?_MVUXAOh`=bM^{*eogG|dqeUkJ>Exkn^>)lab5L#nt=D%Oc;*2i+ zJ_>b+T`c%2b!cr&1bnIT)}^<>>COiuwYgL-CdDlsOcSPcutZ7=Co>0bAh!xpL$ig2os>l?k(i*Kf!;UW3e<^b8;vGwpt*Tb;VK#WG-_dti^Pos z@HbTN-B651y8YLfSc;}Sm+ej z1SV3(LU1teTEZ>yd#x5QXB(ZhUK6%h%&xB)9h@yOIUEaU+I`DR<4Rtme9V}Cue+B| z0IY5qOUyb~=4IRdbzRpG`6n2fo(4G>e(M5yUMs1YD(uMxL8_y|#h>||)O}YAsfE>z zk+fy&w~N91oP=`u2bf1nTl2gmYDRavV3=$k=S)S>Z?6_I^1O|%HQSgPihnv`exuDf z_1C%GJ$=#2(y3>bnn1SM{Vx6_1Cxqwjx{zvOI9YupNGn7o|j4wtIT09uUeGRV>N-c znPWCjQ%V$(UuTMY4mCgGT48ZsV(a_=$JblOMV-Fi!!`nv1|3Q&B_Ps_h;)N=i7+57 z-NT}Ubc3`?Nr`}TDKSHeG)Osg*AN5Dz;lnTyYA=reLa6qzVEp5T<7Ro4QAs=Y=B{I zE|~%KCq;xIZWW-O{3Q56Yml{B6@Y!QXKWeDz*=7atCmb|`eqEDiwcR}$DAlyi44{% z9_DLtehze-oQk=wTXoC9TIlOGUmg1@gz@ap6i+X&fPNpavSx20uQyO|x>ylkliLQh z8%O;NK2?&67cy4T*QumOT(wJ+GA6^EdVBe%i}3StHbiA0Q<70+`su_I^Uo2!L#JyJ z%8LQ)D53j@JH|-_Z*TH@$IDyse9*hA%@AOsBx6Z2mM7d$+2G|6H_h28w0@t{BqB-Z zsa0jS=zVVXfXu2g<8L2~%<>JYxPN6{EYiN)4^a={p^fd^7rj}Dd0Z4ncy*HOE=c`a zfq6%%C>mN9qHu=S`4_0{!_xqQtbN;PC%%Gkdssl?(b1f1;@g3z&;u}od$)a{_t+Y zu-B8^OH3uPzMCRef$$DiIH6MP288_SA-4Aex4IhxAMs5_%Qd>&Uq3V&x-&?jKDnJR6@?;anp`z)(ezp^#Q zE&p4uZqnYuHF{@^N|l4j3HM*WG9JshHcQ{ekN6j=qx;R0JkC>?sPOgEUB7*MFVJ3I zbU{1sY?3Eam(Wo^`usG0JnH6guh&b5$PP1~rf4kQ+l_n|254patyDQDi`PZE^jsrR z2&$cu@-WQtjJ1yEg1MmHCoxIa!g7+ME65QRmWdo;1WHsCcpJG?v5a5-M*9ugx${Eq zR$&IvYFb6o`mgd{x$f`VI-uLW0hb*%V;1eIXMnw~aBt!25ulBV^S_M}D5-JvTuY8k zK!8cE0=^%t4cjrjW4;ZS*lu?OmtHJ}mbO2n^?Wvv&T7G+Fp{BOkRL&hCnk|!5w_d4 zI@HR!zE>r3Q>?K#)|X#3Ja#x;Dy*Y%pDv|SXYtnH5nIEqm-_xht7D`i-CgFaW{82& zFk#xptcc6AhsZn}IWhJHUx*r7%V_7mgMeScAdud(F`h%e80hiPYNn7EUTNmnDl_AD zFsDL10eK$Lb$DT``(-{L9L8Y3B{q5C)-j2)fXF8Y_Ny|LNpS{D^8jf{=XZ1W?)~XX zvt;WuiOZkUZYR^HIzO+M0FZyg4^lr;(G1vyH(Q06W+0VK50{3Me1wEY7~bcLn%>;u zKn-QOT6LM3d>@*ko&Kun#=txcQV#B%wliQM#JVXHIMqYQtjC>*)9=xcn>NvxGb_tC zQHfrX10Y-Oeh0)_qV;$w-HEL`K7JcSiWjw{|q`S3sUqyW1RTeDQFRFoyQ_FD^eQa)E{$}rlpFFzr^>te&1}&J zkz>$o(HHU>BCg%*N;L>VBRKDyF~1I(eFpB=>(fy4F@WABdR}jNDiHILI3gR&k&ZPA zDNTuw)r?mAl^895_!ebJOv3wfK{k@Y%}8HCq&^-PX*mIFLLt^nqL%{YK{bna)-N$u z%gi2#-d)0;)pKUyijtOLEIZC^Tu+-!;=4PgzSA=SQIMA~r?Kv^Y&eNpPk3`7Rl-;r z5zt;_bUtC1rs8geuGJ=sA2)K;5e-BGW@}O8x;bQ=uqP6+n5XCs=<2R0o*Tov71sjp$_MqHZjCAY@+0q_ z4Z>VsKG269FPpNyjhP5(G{LUuz$CYSvLP27doWHhL$zTho#Mtze36pT*x*u>Mg+|R zcYr}XQ;poc7s)oqP= zITad;=gA|WoQskjF1Uq4W4Zm1f(NaS%6Z}Cmfq}rd;1;S3PGNAsv@=bHF6t_T)6t& zH7c*!(yi!zSS_3@kGt<2RFhs^xdeGvSW~>ejq%}Hrvua#vA!=C4%}=9w|5iYX-pB) zd{3TMzY?XUw=W;=zwvn3U_BsHu^#+{pW z6R?8)OE1sLFDva5kzsu1%Wr89XPo**8z@uV0W(A_C`)Hy2(7fI zyEjsICp`7MNm|u)DA~9j>-vm&#>Ipp zl`7wSI{YeAj&z+xoB4EoUoqImxwQ<71ROFUXN>KblI2td+AySm9)o7aEqQ~{df#Sn z#>N0s4nSj}+QRt3JnEHU8b6zT2Kd-O6Y)5WA0y#q!B&-rp8@*1>@o-v4;100J7G;b zH(+1bXBA%9%jHseE(^Qb4)O7HNG}9>#(UC^zn(BFA>r!yO7Fxv!H?>qIP@Uj9~AG_2FkS-r_Wv;U}mjx&S%L?f2^3xzz5y)fPqBrC@GvLYJT7#F$D_UtjgI>n_VxCw;m7{YeSv z!NqZjLq))d*W2q~rd3anJ@B5^qRGcM$63VnMdF;+GhUFNx;Mm^#vi-b+_G(^=!L#i= zz2iORx<@&I&e*sg&NseaLy*AQBxDVe8UGN~oklajTU z_vQ1zt-Og5>yY5uI6BzeyoEM~9YRqK$Id*B=yxd;UAnmE%2#MEkqW-MOnr6M?Jzo; z)N15I10mpVatGtBh8ZM8cJfF>7(b5NCZT8>&t7{L&!S)rGC}HnM}F4BcY=osp}=_( zzA4I_xVO+$PX)z%{=q!+{QRm4T(UJm20liUtVfgSX_J_XxQiRRb%#bW?eEdgBHG&* z{m}jnL&aBj#>(@{pl&on9hF*nr1JFiJWPV-iW{9b7knC=G&+?H4RWU6Xq9(wV`>kq z7jI!dI9L_VP1?vTOgS5jcMy8F%vZ~RB<($%(xyQ!G9MS|`L+*paB@B-@8w1pEiI#`2@>sw3a*1y z(P(z~e?VihH&XMSblw{)Z~Art#bs2gpbU5-!>`c`vjXl~HNaB#5(g^BaW8Rr&XUN9 zr(^SI{D2$Bd95J|zYO!VxK}9V+2^#kxo2+~zT%7sS0wnbCJg9LpdS%r(-b}kwdR7} zb@nmwy?*MB4TiVyYN$g2`hS9hOaMfjicA66T}N^(x#N5YUu+c6V?PxqUhj zFr$-H9BnT@sn@UQ;RpxELhByLBj>GN;yL|#((CaUv@zS>#zIwin8Fn$ZQ29*X5BIVFeixv9P1f5B%oM2WmERYJZZ0q4D0#m{IZW2h z?~LNR_ebf$vib1;mw`??s*98Nf{9Gi$-=B}U@-H|($p)RJ#l~#lmKGv z)4m8j2ma^RD7qGVB&lCJJlz?Ua(m4zDiT-0ocWjS~`yKWNQsW0-PLy{x za;tN7m!4?wwKQ}}m>(=}OV>qFYUCftd+RwQP{8&zjE&&ldn;NjXk!oL#d0x=&a{<>CSB zx?F?T-r(#dBI@=bONaM6Q-4gkUW5G}kNZX1h9joZej${*x54b)+Z%V=!EZ)m=5)8C zh@f|{LZJx^Ah4GvS;sTjAcKn_VuFs!CYfc&(L{OFaa1i-Xw3rJqT_7t9vScu*I^lD z$Gfk6-57U_efP8utNe}y9S=4kC$o^biVTG#|&Evnh+cDAoNpIrK-eY7KMNHQ%xGK3#-m-ufkpkI?( z;IEen+3P0W;>nbc>lwL!$N$rE&7_l$P462I24RlG8o8#NydcH>&&*W!Zz^{*=Hvy_ z9K6TpW8~=IIxlOkc+!87Z$VpqAa*TTv#?b{c&%N24;EBp#6?9lX}IB77{BG*xzW|x z>0}Ta=6H0n+H|5A($IumcL;4d#cl#>h|`lrzY|$w&-v>~<6(oKSdO zWeJ5nh;$~V`2gPB%z^r7Pl^%=E*ygmSpX7A#*Ips@RN7NLW3Cmq<`_U{jndBLK)ww zc`^+ZVma_?yVkGb^I0i9{#H-nojVQY&VXNDb98LuJH%l-s?>Kh8LEF8ETKk3e90Xb498G_y_W(V1}u`|Rk{loEY z14G`<6c`;-RC4;XOPkCC2BD%cS1AnVGA`wmJag(Bk#pOw;CFzIu>plIbB)JCS7G~3 z#(6Sy+R^F3;SG?J?I?u}Fi3y(^+_HCE1$5h&wpI>B<+EBqKRJ89?+yPX^4vDOifH= z-K<9uU!)4meEO5%kWR#_`PpYj-S4053Nv_K^9?P*>$=h$03%~Ezn^63ylfQfl2#@TC2+yELN&%>QJ+<>9>fzYi&k}{Gp*cWt0QNP1?zy|+*(Zv0lPs@j? zSpPag7P|$P<&=yfk(gDL*(E;Xg#8YGZ-TJeJ#KwN-+<}vWd1Pd+%j=}w9&AVMEaN| zLIjIFnAYjiAIU$3+h_%BwSWN!y&Dl!N4v{Xk}^ok{*R_oR(-d!TTNWdcM~i;fhkHO z`}u!2swI})3m3Rx5V3OG3_d=7!NgeY+8fE2kH>a`8DQ)Nm9{Ym zdhhUa#nkvsE5v~eJYR9Fg@CpZM;eBs|1zf))$y=2HdzujO=DI~W!*l1Oy$JW-97bk zf+K95WFP0eE7z!X!~-(vn|iqy8C$1d$LmjX>Q!cXMb;@?R-R)HBU zHJeX$XjDJvfI)AQTZXsnPdNAH?~N;Zm2zHK>WE{@0n}-+si_>9Km{b6S|IdXL^HS; zTQlL{0l>8E&x`SsMvHgp^5cDDtyekRtLrt-T=z&}UiIRblG08Qm+8HGL75waVV);j z!H{Z~m5L9lrzX_MU~&WVUA>oDy1MLu2~W*W%J_px(-brr(69kEfAj4j0Lo80neyK3 zD1&x&Bihk*KqDn<>v*NG>lV)1u`JpsD9DLf&cXT9GOuWGDlI}IZvo6 z_sGHx{2Mo3%LM0kAE}_U3dIu5+a(+Qgf|xka|G3zNuHc+OxAV9-_|srZuGq)O54yixdR#RBMO$I~lNsPn z1f(+#1<*ZbpxDy93A97BXZQ7zk|BN)fj@J=*^ua8&jhyn7vE;HkTy!!ZSDJ8YXS%t zwy4=*p*z`L(d*{c8NNl}BTuq)XNj?t1K+NkM}0@EmA@D%BZM>85M2AsKWV0wEe% z0WLW2>dBtTsn#xp>u5q8Uat$!Sk4pO!!ayAZYGg@D9ppm@MsFqN}3NB=@szv%+U_x z;Ri31Mn}H{6sLeR7e})!M)qsU8MkY{DOCYHjSW)-ZHTthUJl-<)E3TzPWxd;{BFk_ zj5M88=(Z4hqlm>5r#Jmhy)}>Yx;(e3wZ|;4!%iB1x{o)@h~cj_XI9~2*9^eqC16c@ z_>+|gM`gnI+vCTS7Tsv>&6p2fpcvLLY_Ih@J@6Bxan)_h__|bXaiEts%aB-+bUDjY z#!7UT{O8Ql)tkST;qSxs%fa5ph{f;e=P-yYy<5A{BZwpWPx?^rws!8@QPbC5sbsF< zK-ceesKo5FB%nD3IFc^^O=ZgRi~8=@Gb0ye6B*zmxa&31=xcDYlhpKj=j)x_H9rFy z_fgS}8EtKPQ>%fJRzWB$5`-4fg2$05eHdR*(B_s#?X_qd32pCx|t@uQ- zx-weI^nZT%Z`Me_6bMMMZg=(G!28_J5-tav0@$1Ro$Ab3%q<=j_ z2+8X==E4G=*twS^5?iCmBbtaQAbV%h+}x}*w)nX7pM&+k5ejZFJJ94zU~t@Sxy6w` z{#Dj?KnuF)?8dOr#^>efp6na%`+Fz-f0>xy@c{0!XKxXH+(NNpi)Kiu{6yW>qLYlh zI#^JfEhLb~MjpIGC|zj==-;K3_rEB2e;>7dNZ1n(t>ZDeN}VGduo>UB!s z4v{RF`ut_F|GrT_Kd6D$irzvAjQ3KoAN?QO`_B$hFykh=i^jb4F*O|X6}^i8|9_Q{ z47Se+NkowoG~EK?1-LmY`y`-*5Ch1L>qyC|SNOzn4Bx6q#P6g1b6I}#Mc>B29+S=2 z(;}kO(ARk_r_SLJuG$~;*bu6jaVqIdVLh66p($%)CeL0R0yUhRTqsd{;#K%8PUhwG zc(3=}7O+}>FEnnOev>;~gF$(YLHVv9_@PipC#weF8T&gCw9cj{p}KFVU2L@Jn9Qo2 z+Br;Dm*X2u+z<4ygx!}5ZvUPF%7$S9#OF5?)c>*ruYrov+^>p~{E7NeOeZ(y5(ynE z;SC-EhTy=!u4QKmaqe>SF@lbz5fkjBFrP+bhS)kEogNHHMt2+IaqIs+pocDV6=99Q zSgLI_92nOC(z$FP2(^>NfR*!a+NRAlrLRM0?J{Xeq9ao8%I7Mjno8IY7bP3k!CKZa z+xQ`6Hk)VjbxJb4(xOLuJnE;R84Y;E@`gU=0P#k^-&Xy<=b9t6TY{}G7y+abW2gnJ zU%%+3c}>yvMDOSC07eSsRNgM1zd6S@-A-I-1m*qAx+SobK&0wOZ*ffLGLmAG^Gd|x zKV<0|Zja@Wi^}_xey3Pihy>eq9{&kDRt`!5t2unBRZfJZWT|EEa80QK1int&D+4(F zE>z8{NV>3uGnqj4DB+S0+`i!->T`TPRWYm67dpy^;do3r>%kmZen^|03rvUuF_9QdZ73D zuTJDO*xBw4?;rkn6AHEO-?NW>vWl}EuT%y+#mjCIN-bhwo2yevO4>LlIEXD4Zk_i~204!wDQxs)YwNkya1m7wHE3e~ z{thtBEQ21(*F?-#`|QU~0V?<8r>ZX(eYln{di3-YGy$&TE}Yt--SE zC-&WaiLqF}%B{`^=n&!vk4}qwM@pt2++wPeTWpuxb9W7AnsNscAId->0wI*>lzD}a z@SqyPmzXg-`d!FG|3Dr`;{z`ZI5r3D+>gVbr_D*|b?Qdd@QhPBxnNrr4a7u|2Ve#KG0k+_F87~T*=Q1Pwl(AXE<5wic@{e@ZJ#CtF)!M zNE2wH#`jh487`9SmPSWjYDDgis3#b|Uk*V`#p5w$X)61ND8}HCLxI4Bg zkK{X3Y_ysN+fLLcIC7eD-aKEqKEae)X`eKCOaMjBhR59N?lk@++)oP3vMR;1bm`O@C6?~eA_7|Noz~{4u4hUiRN8lw z%c&6LaK3zR5?AF$pCRh&S@?0RVBOLGtUC}8Ihb&c%8EbkP=#qItw_$)T!pC&d?e0u zofSBL6Qibq`EFBkJ=F~`kqh+P!PL4e-vVU4aX58JiNm?834M6-Ay!vem7-Y`6W=#^ zA8c@e>?#3BCUx|r3ML4FQc;-L{-{~xcq!1T^jt``oCtknev_4zRiP`9YaSRQ_}Se2 zYd3Bblv#EU)HquTd+ucWfYt>r9qnjuj|L>AYMp@E$z&$*(DRsJe5owab9sGBLPS*ewkme={bss74&%ZQ#a>#bPf`Wl3C_vrxV_MW1w&UoYnt%BCf$wib24Z0G_Z)eZ^Q=#TuAT@j-BQ;gJ0uVZaelk#U3OCtPU5a z&Mho-=4Ha}0C9DviAZ3zy3&N5C+rtFO6qTc3(luuG~R(DmIXa)(E#1n%7^R^Rp4-V zC&;gdW`V3`oJM4Gi4v8V%WKY$=JLpyvw}uPjzApHl{AyjH^`H()g2$t8`Jq}(8m0! z?nsDV`v1~fuiP>`2aSM~7jeep(Rcu(J;kCQ; zmTw*`w4FrMuP9{|r5jwgho^w> z5;5$$@k^~`kVo=%zSROs2)|kp^`X=8k=GFj0-?L3jw@J)$x=n^yEdUC3dL5!G0nme zJGGp8_EqWckjVA$9KYl2OUH9S=cOAFo^@KZ~Ce>e>VfhTIsltlBBUG}dX8FshG0V3wlwGlHWQ@lyfY1(% zL-8fH&ow-sw~|+ppO?^kA+$r!_8jL4fJG9p8Ft~AR|`r>Pq$M@)+*G-sap&sU5q+= z0(X8Lg!mb;xvv(3S3U=raAf7uQu)f!w4s2~t{Svd8yUP<3Nyzl12K#J^>MaoA5;W+ zyb6FCcn*&=3;Ag0Kf6d31R8^`b?KuS$^4cn%{;emHK$$TC7^ni&QMxd&`h#(mG|Sz zgD{gbAd@**=<;fr26O|i1EHIJwGpY^gEp%j-w23*un&Eds45}F+wyQdBkL9fYjn?a z(R!hmKJ%)HDLfX2wV_afy;6;?w$5| zBd)$T|C||M{CpB#^@M9EkhbNu`{$H(yt^e;dl#**KqfC9tc{iP0iNuT$Vlz^YTXim za<_cMI$7Q;QJCUm4uy7PhTPXOpgtY37Mcg`^?w=45BK1p zbjOEE4$nJAS0&*;wY?E-6;a@`;@Y$PeYovPv7&J z4NZ?yIKB;{1n03$J?wTJClu$ixP6?RP(8lnlUeGK&ALYniZ1KxaZ@_jh_y4c9NV+( zX3Lp|{%Yy>JPVR925?URomoo6nWSC5#6|=eLb|Trl2(ybCpx*LJTJP#8$BvN)cUe? zmo2g$fp1>P{L`d*Z+*dQAP{lM-vcMi*kQHB=8^{5R95o9h}3M&17I?PXdIfbmB~g) zv9a&?Hg~Ii0Q>rn=0L(7P%ABf(|GWVhHR&p_+cx;>BaSXW|=YH+9-0`NE2OGhZ8=1 zx|i_weIUc|OS?yLF$JvH`wmlvrnrT?eEHhMUvI&jmX_n4ZD+wZT=ZEeYT|vl9(al1 z(ZdVV{4V;}P`UZ|ytcvOPj8=scyh+e@XYMY^?A@32JXxj65bUe!`EK@P4E&hsZJM_J@)Cu zIPs!rb;=xC-}8o>{S^q#oi}aYGeYP>^-1Xq&d2b+Q(y9P7fZjCULET*L#W~cZATW* zD9u{k9f>^h%McThy_M~Kp9EiO&RKikec}dxQrH7=ebq+f|_(S;9C z`U6zyy*J?md^y=&QVPKVK?SGl1K-e>9xn=cg_6uJol1n5R5Tb}xtoZwPqO6PF1!zy z2E>XCit)vcO`PJdc%tIw@jyK-(LEfEU69bDdfKY1{nr-iUZe>SiWak&1A453mSe%5 z;>Cm|THm*i6$onEbOTc>t~xM&_I?KX8ojH5-&$S=qiME3pW?zxe#@`8IFRh$EV9HL z+Mr+W*|{TSIYvZt-y^}7M$rB-%Bypuyvbg^?6`mc^f2ylkWNNy+bBCZmDQuZigG81 z{h2dr>7vDVze#Cm>%perJB51Xxz-Z?kCoU!!hsfbGw}pS6ONach%}TrzkQzROyDTU zPI`!&Hv9c2hUCyzoliNzcI+o!^SZ|TK@n51xg@*4H)Vd28G9~y*|KL1j3eNHz5o6YNNy8h!~MpquVER~o*w7B^bu=6 zYh}N4Nx-s+0#UM7E(1N2R4>DJuxIIj2T?T>eGyxrmm0W5tz+}O1lRZGmx_|y1z{H(HQnV{U3U|vxAr87pZ~ym8bG8WMaj(^3Pywz~ ztF`R7g6bbht^%k$aA3tkASTbrf0Z5oSwArF$q8_0O|h3Dq5oZzrZWUh{R)sLJ%a<) z9P_RzNlyW4^N-Ev`Ky^nEvKOySM&bo32!E-p&bDy@gIvzKG)o zz#{uZNgTab1@rU@|M?6kGm^qoSnA!z@}#Fg;gubj=MvCE64xly<~r<)f@Oc6_8=a( z8)bx1KO!S&irq$EjZ(x0?fGU&iX&VDf`Sx4iv8(X-$M;e%^cAB;T`}rRCeFv{e%xj zbvFzyo#tg{zlU!7n44$o%YyK`+jDiOkd2p@mz)9+H0>XV-?v{9W%2YY)~|?_@I8_P zLh;!kqk=U-Na^Xh#l83A;Sn^)*pn53XY6}40{bCsv@^jYwj&?Xj~K?k91yxm?Mf4IRp2(Rf1g<6dt8ar#aZF4cAO1LDFG2KF2fVp1l^1| zQ266ujT0+1Rd|ot(3i~=Zmh2`+2mnklLh@jI~?bGN~GPvBG!MXP^a)9(DjKCX#Pnw zHN84{X}*?ff|Ehm4tWcN(2nlsUnVV zX3~-~xu3{vR6nTQ9!8avsm2NL52?Qr$L4a|FelCu=KaVDA78(afyHCa<9o{E!Pl?CaP8jCuPw7cQLAHbbws=o z!im~OFr@BW&m1T=sOolo7#&H;`>^L2h=e7t+K-jx;4Gj8oayhoiK9U-0tX;`9L^`{3f0KPY0{mx7n_sCS@(c zH;02cu;8ACkb*lN*I4lJ3k(e00gHTGzuBttdLwC#=HTl{ztATwUZiD&)#JzUo$aXI zlaE&(TpS zx@aJ7H|wuk*I;eSqyzfoSA5L0PqM<3M2PnGT%wqPM^T=HK##TNm!nmpS0Vq;WG(e8|HUb!? zx`0fa?i$vVPVDyYlgDxuwxQs8K7DNQ_CW9V_LsO~XUUbW7n7GucXwSpk%1K5i#Mwn z{V+i9JXcwB7V%b$vX_a;9yBc_w@=^*vsdqC5m8CrhT>2(Fn!7b6%XXs zfY}2hZ|o-fdsyUVzGhy5E2DP%dnwtOC#0~9smzjQ59RW?Ocxvbb z4fJw?P{ib-gPVzlMl48(Wc~Nhsq=_Z%zr&w=L584y{VzN$s;2}!!)HdA$90J(2yLy z2BXv@powxU1)^96Go=^>CX8CpD6q>~v6OPR1MJh{&HnUSl3Wl8po<_Adr0S|qAeCkzK3qanU4@gKu zVW7^(=YxKZSdMnZRX6;2^gqdgCuU`m30HICaFKL$EV#Ir z%x^UGWI{|rlx$nTmBYDYS3QcEa9f#fev58O3S_X|mt$^#C@l#dd6^{_$AX2U^- zQl`YD;WQ{FuyAlFVu1s;y)M{Fh{ zfyV*BjZ(ujPSR0cIsOWh<`xdw^6tv)g@=4I6|R?%T|7;kz*`i=LP;}@4>onml$t2O zV}2^ml8s7Kl{kEa4+jpp3nZa)z`zRTnbf%{Cm{FN#xz63fu4^X7v>|Ehh2Nm>(`W? z0KGV5cKLxh3ZUTR?YwdE0|p3f@hWM33{<$^LcbGA` zD(Nd4m4w^S^pkKBZsw4t#=FRoh&u&t$BwgNK z8@W?g)J!s1sH56;R?k6~hnbe%ro+N6#ov>6QsjO{-bmT`w|M@+SEPz)R$qK<*lKv* z_FI798UK$%RF?ZNU>Zc+OIqJd4MoINGF|H>h~V8Vr|kIH!(P7)S6^IX3}Sxp*0gP9 zsIce9TiP4x*1Q&(qDTsQ`VY+1Df-XOqWcwjItY2{f3Rb=8lxxksz}TzpL&W{rh^CW z0tcv^)SPT(@6A{cJqhXd3(`l6PcTHKHK02okpAa7IFUa`;xR^9557lg^}lW5?DS&r z_;EG?H02o-j6q0Olw$s-CZKBXM@3OtNj6BMZMd@sLA$OH4nN-M;u)fR@hleLo0f|j ze(H9vNPd=l{q|;%y@%;_7`tZBt7G(RlvWKFwaXKw&$w=iys!gH!<2E?)Ti@Q-Z-Ok zhlyUq<$bJAyG846d%K+H$>HLVmWLU zhmpNy9>x9P(~B3FX*x|xjZKoQ2H(frv+8g42`|&LjT@*R{hG*SkTOtO+?Z{iI9Gua zas(-}qo*y`?9lvsj|>xnf9^NPQUihO*j`Xf%)B(a`?J{{_OLz(*3%!4BLDYo50REJ zFp)x)EMi=iNN8laoq;UYKwz^Gds|cJ#!iI z24KUxR27?W&1%Da({%lP-nxIYVIbxL;)ZM!Mt*EjvabrL>-_PW8v5evG-!+$qRYD= zs=7&~AHx3gJ5JX?nCG)T(@T-|2taE7{fjsM%#8p2r5BITRz=BP)b0Q@iQ19+V^ssF z{z!xYRSPZ={`Yh2n9vRhw`G<;b|IGV|4D-XeLr|2;e@U4(99d2BogR16W~NT^@<iZXVk* zf+XpmiUN4>gtD{|<4&5TZVA<7*su(J_s$=ybPbks4F(8X#4SwpIwT@ol^`Jc`*)qd z;d5QAuslGX_TO9D%8LoE2zU)5?|;9(ndkF-eS38wXdfhZNe~%_{pZzqT;%wm$Z8B= zQa#NN^vW?piGS`Xlo7)?dG|vGo8$<<-%tT9!?o`fiohFZJsbV_Hf1^5rKMVT-XFvA8252s1^>f29Ln>Om$GE7N*>|nL!FrT}sDkga-^M~~U(gy@hDFQaN z*PTJ&5ra!Bqsq(6q0IsKtN=uNH!u^@Ujkr3Ipi=9SZOue0*z+wUO}5-r5zMX%ZL7mXs%m9B~6=3!gx#MA6jrZ5IXzw3a>jat_ylrZD!9w1dn6t!}2UUca$91s5Ur-QXWn=3C5QYuDN#lOP77ybruX z7l`Qc;5S>*ey)HiP z1|Xsi@KVB~*4M8*#M?Unu7p77k2nlizml!-I`@(h8AebOW)Dv1w~VX@g&Bnm$&>Zz zCXfpiAM_%e8?4*>K9Yd#{+(MKcJd{3@#~S)0t)i?<413WeFbDUj8akSmOO-& z1WjNj&DjfJka0nPi%-L3oqHE3UWGwHvdd1GyM1= zelK|Y03OA#c?)wFHU(Nx2DbH(x~+1HGhXB{JQDi&1P&dML{c_pX0QaUp*Upf@9zS{ z2xqUu^pC)iZ8(@R*f8?iPH{u#L9=HLo&vtIddd?wEs^pZ8eJ-iB_Gq&ML) z`lbQN>{omZ&&pHQEhMuqWeSLk_zF^gKqBJ!$;@=E5mw+Px)TL=! zE7d>$=RffE(Fvy-rb0;dMm`)7<&x8;9--D5{&UQXuMf`J_(DtF4%lA^#&T&&Wufx?TYXAt*# zoUlAq?I}*V2dP$U1h!Aj+|w??l1D!8%|#UE86)gy7^wiiKrHI?;G?Jrgd8V ziDnI-iryt=C)xddu7Uc98hWhDt6~>5SSt7}r+z+RQdZ8Bu!&n;ozHv2=i*7FDgvcT ze_`f_KP#Uq1pA5lDkx9(@VP9h$tx+vudUe%Br<0p&znCV7p6OweinW!hgiUDL) z;SAjkdU`Of{|sr_vop~Dv#^%rtDBeTdp!pCGz1W_5|SAe)CGO?I!O8&F*}Mw&)`-^ z0Gbo;k}BvxADBs@cCNu{fK}kaXD|?uOTF`B6PFZki@bD9z3ELzKyYWSqE86_G*>lq zLkOE}nV~*_2&bAII0Aaope{NvyZF*?>57RS&+a!g!F0%{PoD_JgGNVnYuwgTtla`n z)lW9J{v~S>teKYSwP} zdzkxXjb%ZB=Hj44Q-K%XumEmozNM;huXt;$7j{3KM)1S37clScqo1raTjtH+;rN?P zrzd9>!+3mw()?nT zRDFDMH6ikIfL+f?U){^H{T7CkZvc zw%-b)fQ^LHUmdm^otSUl+@`CmeT42wuX=zSQAds_9l1iYP3XsTSdXv4-fgW40oP`l zW|tCVR6Z@f)6RtaP`6Rwr&2D%REo~9-$*j9J=W!^nT!1ha41^|B|bS*XEGpuC&uG- zrzgi*pPt;Xw#b4lrn@(ToOGspL>eh*-X8tYX})u7p2x37!g)`4F>PS|RuULD91@zn zPZoRIYx!Lmcc{h%^#M8t+GuOM0@IV-0oCA4?3$%I=2}v^THQ7lK=rTShAQ=Mz~aL! zmX$I-7H3d5xODR(9(4q<&r^%2A^fWi>fG$&Jq!_#&Ilgk4hvPuB)Al9yblNVoO%RC zT1_vos%A_)H~PTx;jKmUCV%E(H*N=D+hN=Fq^3$6ZKZrun~RXs!`kj_&L~>pvT@^* zcKH?S$QYXdImiZ48V%3g{>}D6^(-0JW3Qa7J;A;R{8oCh&1L@i{p>iEP@d1W&qH6e z`(J~pLJnr8B*_IMNygp9Q6ov^6~;0gIwi%@r@ptu4*C%-v2}?RRH?p8^DXb*dm|Hc zT7r5ETA~$$6A79?cPC(;MC@AbG@nh0uS%V#%;@$Y5S5E+x>4o0pHWE_r&&h))Tyg- z-LyYGU%7X=ASP?($kwv$CC6;dTyx1-O`2M&fxmC*+VY|aL0IC1o}XUGRw^FhjkbBw zOfPU6A#WqL%uEirn;>GxGhN%1GLf~lJ*7mA=5o5_Ff&8H$slE9nSSkjmqQ9nablyZ zq>-%Gwf$Sy3ZiW@Dqr}exApj!qvJ5}dKSLhOgz*I7AjW&na(7kk#4%wp zix3AU%&SL7{Yj12nq==W1)xugtENJyv1kM&(hBaL23ZyZQ<=KM(Y%igZ@uZ?kM4fN z4@^N5!Di#Jy?9FeZm!bF92*3`>13~@1>{d78qDL3=3s0PdAOJvwYXhT;#;gN=zR;> zj$%%^O$e$4UPi5`&oUIg%*2byvTHjA%=9rv*>v+IV*%nMVp|WoX*xw#OD8Q5SkR0N z1TF7)ZW`XFk6wdD$VTvJUsl#=p2RytHOuZ40ggzf6c_}CLBAduJX^2(8k8*0Qoxqc z&i0^0>=B+Vy7)3&Wu73Wm5{7IbyL%mStP-EhBjaGu*WP){6?hcMs@${-l$nrge$Kn zypgoTHq4jQR90Nwr;iVG6#LojR~!Sh)i00>bRA?1Yed(kuZFIxO@;DA3rPSnN*(GU z_n!XQ2RjX$zC4YiolSK!p&U=eqnWu~@jbhQ)eqD%xB2{S;(N2LxG&cj%a=1?mt=XH zcASg?FpGhdhhdzHq7s_J`B5HJ%`kQoPN{kx-H(h5JtVNLms{9PKgbjh$}aZnpetuq z;}b*cPJR8an|fc5CUp8Mr5|M6>-Q?ziIQeTZoTGjS)`v z>r|Nii-cKeH1{!`=%*>(lLNg?lpD2+jo+H~m3-&5m}VAJExT<;H|BLo9c>*}ln1iV zip?b}hC=tjw0{ih7Tu@}In2d9*I=_|ajyiwbGfb;uxz;^gSYBE;arZ>f-0{mqgdn{ z&YtH9&(zyYbM7U)%BC8zeDB#7%U=D12gFS~1R@XnD`Su9oUQxv5>K`?C|k}b{}2Yi%cdZKRJT^U zA6Aw{=^JAH@JBTFt2_%t#KaTZZv`kOhUn_G%-R6H6!+yEF(&3{*?27IWh3m|!LZRT z6Ewtzd$T|6*}MnU!?Thyjf{e3Rj>71nMQb^iO$eUmlcWrY$Y_AvQqMs<7~Wfc~6y; z`MwW^Oty7O`kGam^sf}MGk#Q8U+Q<7qaC5}RDnHTt8e{*d1c+5g92&fzQ|XLJtLq< zV&BXIeujCGfro*8+)%R#-Ny5d6E_c0RQP@zoi@Cvf75}i&Je*l`~NZb)?ra??cew@ z0TD?hC5BW=8l(hd#(GvKcD-vWYs4xbdNx`W5?sDb0z}XrX1BAgJ)Vqp=-3=5qwGU z7f5C|>fb~7kpM>#=91qv^jb;e=_^pKOVa7bFL3lfpXSNZ?Z+2Df9v=JS2(6^%WT3=7MgphVKMeH{Kk4xj(FB=RcAIYpQ&HviTgn>`>)I8;<4dlXW zwm9Z;tW4i3HL9=g>ppeiICUN(!0Vn@=(O#`9G)ieGj4$;lRB8re>FB%r9n|*Wt!Tp z?~}L;c7m&8a^aGwGuTfVx?6OZ?>HNQ(aom_iwD_dY-b66&X4dVAKpOHK2zURTaeF* zkkjXR$Qkw-vKA#7r6@sOUoiNS1MxoOHa+TL^i8RgK~D}qN}5 z$00Y*ui>Fx0O`S=Y7qJS&egY7a+ZViy*F+}IZ+MZN?!EeU{{agpID44WU&kC(biR4jqnZ*Lj1bjoGa-bJSwJ-!ir3jji@^L zZPX@QNc%c>b2bN`3@~Q?tPWL>v7~>FX460&uB@S&rD&Y)=XQxQDV0k}I4!oFP$8U+AuB6zn5_;6ZIS?Z7g9k>`iYI&e zK{AsRuw_To9W)O?ea>!JsUH98y`&Dx9eQM#2qC86jt?T~ybRPT}dY zC$asfg7D>~+&|oIx*5{;$r~PbN?!2Xk{rrhflmJz-=YJ_){T%du*Ornu9dhM=K3Yl zn?{wd!v%8;>*}AnSy6Z{$eLiK@-p$#+wu)e=rBl8dw(&aIiOGY~yP?e2}whtIxRGgVso7)F3xnHuq&PieO@r<}p^)wi-Gu+iF}gL(UcJuLgByP7n{o ze}2b=IkVuFQ%qZAf)vk}1RGZ>G#i|)ES81)-nYm)xVaG8rhJvKu0}GLE~>;d;U@h{ zD_o{bkR{O=L^t?{#jLjVH+p>dHxqH_wT*kh#yMZJy-XOQd+kn*Cz(HMww=1iteE3X^ZmTtP}&q-@)UR{$npUz3EZrd(tEv2B{ZaB|GF_~d%`7MnZ7vx>3?Vl}^ z$G@$5h@l|l-pZ@(+vlD*7a{ga>S$z7tIAn7SnO$;QttWsJTF+&cC7Ok@@I2}qG)(a znXq+dFu5U3FeEP4j2$s4?_q>mmz&gy55|OWp`honA7mGjuiGh6x4n!`smifil3h!V zXwJsi$~CDraav@tVNgaVxm;C>7yN-dji9_@6Q$N9wrf;d{Lr5cP;x1zr|KtzTUD+d z$kYkQ86!EYUDq2d2}`xq8Kf;yOJL19S#)Hgot1Ne2TGstbm)QBoDJER)pBhxFC@#y ze)|&~?Q9lfMS}?Firr?x#0 zs+W{g-ivwa#=@YTUA^Cpb7(k`ADEXhWsy=TlAD7!$na8oUUk$hd2=RFD$Gj8kQ$WX zYIG9~8d`KpQz1*+aB(}(jLx^czOmVBXb)CcD9jn00?bodfmoJAO4;x3%!w8F8-Bp9S7q3ph)qg9jC`<^Io7F%rHq|A?08bNa#1E^f3C$UsE=J)6`|n6GG)!R#X3BHYLm8eN~9 ztQ-rWc-4;?O^9=rg;pTsut*U-7EFplgn#?j@GjA$JO$`BPrF$UBOlg2y(P&K`s4+% zeaJUc-YCoss#qt*ID20^{Y~%bVr6a;PQ>r}?LlEVqoNkV_n&O~P=d`S9FoHUMK%V_6U);~ve;{t8Vy%_7}(Ezp=HESx$SX=H*2wx`e7|Zx5nKZYI1^N zOEZ?i-E3k#kXuYJ?0=fi{zYscsj4<2NS-P$IDa4Tk zHgg^R;i23DkJJ@n3eA@2DrGa8AI_b%@1q(j_&4iUXl!YhQz>5`R86;G?poBpccI!-XG&QexKypP>f zjlaSlmuv0$h6$t5rt@x>aVpa3JbZfFp(WboJqItXp*L|{5M2vz^HBk}!Z1G<^;#wq z2a#;wp_0cI#+&(JnrlzzvlU_I1$ptWs7=y$b3bVS?vOd;^T({jKJG?^u6L{ot_7Mi z3-jRqD^0&~4)HrI3!TFL#c9TGi1^+!;`KtDTT+i0rtoUL)L?x`ASIcC zV-?niZfH_I7keLk6Ap)O{#F!Cxgi+vtiE3G-u+mgkyEo$I_mnZ4?lU4L&{m6oHG*s z$2wk1tg7LpCh2)xtdO0&%wH&?Z&l9EtQfPOAJ%nBF-+smCT0VjC6dQ z1HPtG`kH5VRoVuzZ||JXcBa$7T3j!Z#>{bI_G0F66aSEFpsG>0lS!~Ah;G(r4VG?s zmBs#N_)JcaapH+BVK~<9@L5PUCy7GI5fDOxyE}H-Z#9#(Yo+@JgjmmQ@n%)!2uCmx zFd=oR@`_ji>5RsjjFeKQ6_LiW`a~PrW@FPfq>$qTf0=znUgo}fcVD5>34?4*?yDV2 z9L7E|<(iMY3!$5gsA&6ZEs^uD4Q3TjuEd7AGWK-F_XN|;E}|WZk$md@zm6J$p&#i5 zHJFt77Lq$8pj`Vt-HsBpG}dzFHmP{Yt`|MV7Zut$p8lUF3=;w>s3ljzaXGuMsk^VG zW=F8D;9ARzAbaUM*^<*qR~{G^xiY-c_14K2`|wUGg6N7uW~`AN^{L9~5*4@H{0by7 z`HBLwxlJMWPYIO~@F_>LJ!0zkEKX<%XER>{evhla>>oTAG(-5OfmNz40_3pXXUddM z$JyMd-Dro{qZT>^ly!3*o;)VbNH)qwZX}}O=0Es78$g=|^QO_!4JIeNWxCx%fB%7e zLID4tg6E7a5oa8VEZs!s{q3AUk|n|`W}Llmz20)o=@Fh}sT4XwH^1(tqH(`ymQEg+ z{oe6AaVe{MAj}3M2f;|K2bee-WU+=eWbdT+%-SpUJA5l%9(wLR;`GIgkf9spJ~x3{OF54Ww+7Lk{IMm=f&%wWOau*i zV6309U*EP>+oBF5+e3h&D4`TTDQLA3vA zk8$;yM1Av<7=|ZQQAMH4%V$iyg3%7ks2|=;-t7H)TJ<05Zq_vsDRixoDTW}Wf6y1i zT!)GXhMOZJiLE=`*_P}`%VQyY`;@5ZJ4(I=Gpt^6y5>L35owPD4#A4GEIIpPQfT_m z()YGM!3ULFqBqs1HEQ$pypLlM?>76e~3J=50kFM7IU9Fg>$O3(CvP1|PvFr3xj zKm$#1%+^3Ys)L9Vj4PjD-1aNUbKT83HNUntY8vgtUB<{WFhl?g7@%UY$wWMX;yEHxJ!R40IE?mVu*D3D%smmhG zbIdH#qN+hyaOd5Z)rv`Y#Ons!_M_ZaLS(SMHe)_ArJ=MSV~;mVNmkTU-%g7g;H>7F zBz9n*3pm}k|Mn(lQ2H7dUON-Uil5|D49npYn@2EhTB*_DAY=7Nekwlp9j@6yYEG{) z8xM9U6TwJxQ~6`_vMVz_BsT$eC@$e)!hiN@6`0 zTcJ$3d{#p8Omn4GNR!lef`7BM_O4QBK+V;RNUgp5!27o9jbz7X4ewS4=ZAeYA9A{CdpT|WTWkN=rxkQNt@+hu(z>cpky#k zX;)+II7^N72g@yLP^f#fqN2xn>BK$K#&4r_gPUE3Ux3ILZGG4WC(7J&>2VYAJy1sf zG!yHq?0?)-MNCYre)0eVMd_U!x|6{mfBkMr5(m#qwE=F!2I*7qqRSsBeF02?%O7Ac zWx=tpyZn*Cu>X+t_j8xE`ujP^f8N0U!~cg8a2|vbP~5v%YyTa4Ge#nblz4+_L-ew~ zVF!QwQ%*&h`bpTP{K7He?_I)wz4iR|e^os)iR|6VMNrb^??0ae)yv)`{pZGiRbOn& z{(11&Qs#1`z%|FBAynu-!YY zeDVt@fGxpR-hwa_Y#yL+*k9L|i6c#H<^E$5URLQ}Bb~kb{6BAFFTuS1fB7_M=l_Ai zJYKtusLQ{*!+fwd%u3_^^Zf_xR|!|({th%X2#d5zZWU>lA^h2~ZMxO$uH`)-`Pu&6p#?|)2w!dCg-n9L`jn-yOvvp5f6Fw5 zkEE)?4C$^zit3(m-0a$=k>|@oP-YF0=8Llp6TmRAN0Lo5q9f-SIfd+}$lA$FNdpeT z)O@r1$^}HZ&BuU5p(z>dYRAz@Zy(^Nne0W8T35NO_CIppoCwi~Nh8}ppI12!CGPtI zWg0Wun7OEK*&J8So|;l&k7Z|=ov(2c`8Aw7k?Nk-`U<6h?-2}CBlt?eyQ)4|yzWkT zus74y`N?*CjG3}iL4*Mny(GMv?(*2Lw69y2Yt6DR_3pU;`Bu#m8l>I{f$Rz%xzpSV zmC2>|Z!o@xquQ<42*rqob5&06do9PBtPSPzm?pW{C$ZGby6Y!IB`_cPbx^+I=gfji z0p^!5Z7YBz@9r;F4XYM$_!QVlg1k?k#9%B=xmMnr!ox_5@1@nmo&bs~taH7z5 z+Dcb^oy%%Revx84OZyf`kvKivVz48+%OTGjlLTit3LANO4rqNvV%$SC?*b=YQf5t( z8>+0WE1Aw!?kjtU@LwB1KdHLoe36=G<0VjmtGZH zI0IrS#SF1_P$$U#st&oN1ISV&AlYC=2k?#={;5xVWOkD zkI$QEPycz6wTHMqvV39_Ra|)Oa^aYiKPS`BHp-;*>evbAY80zsslyQ~Tg}pS2m%{N z8E_7V?12(WIYdT-i1MvLmj&I&$oXCUu{EGRsv2-hFvS3cA)nnv??E#y!uW3Rk*2CJzH9)oJ z5_L`YLP9z__FiO=YXmhvKhPp!z_F8juiJ{b&i z&nZ#a88S|Ecv?+loc_v4A_ium{;9xj@BGDFAsSvG`Y4Ip2(H;S#6;})9t>9ICRcN5b&$$|M=;hcSy02tBe}xb%h%v3g^#(BxPzKeNQJ~* zEZ<~fHii_*xv7;fG$qiF>ow5rOd18t~z14SXsJ zHlBns!#&OQn>e!x>$LU{a#cPy*r}*8Q3EZXLNRH!A-{V83Q#7m#OubIleV%aiT9Pc znq3M@do11FU{`mxb7#Cx7n>q8ubRi!IE|{|v3|Y|Zdc{7+j5x*rvbl`y z`~+cv37L1d2%_^RefM_x2|Ybq{(LW^BUIf!&RJI%`JwkS$%hj1Y|x$@{%a|N#W@;wc~ z`Fpc^idm*-((i<2DnRm6^F>{vn5k$lS)bb%H(A3kJTDR(D=PY$To#h)@ZDr8D~#L> z1&b~}Af!jPM9A)1+5+nkW5;ezHe>irycYW3@T=b)o6IPirGi{nC}5bU<4W0` z@NY`$dH~vr{S9(QK80;7nFjI8kSq^AdEiXUBg^z$Vw*nk0Xpc{-n(1(b)P5D)a{xr zzBOnfJ8pj*)uzfv{KCx$EVLNGj8}nsBRZj_?=)M=bW!eFftndUj0?%DvS4OCTC#lS z81a=0?NC!^XEuL25&Pu$+lvd}3J9>j+PVvCcbMz)L}3Mhi4&$&nY*!yOa85mfg(7NRa?% z9*hKS0nqgSvi#!&r}iaiwt`c|Q)y{J1qO3uT59mN08q6LnCOXy92ZSa3OhFLt;%@{UlbfV4D8zF`w>}RNW-5z^?mEuY7q> zBtt1`q}S;m=!8_6i>wA&nHUDKY@qd%7`-RtK^>OaHa>D*4BSxik$qOVUkGB)NVtY{ zG1$p-_%nuki)q-UgZx$oP5Oe2HL-I@K-A;O$?IMqb5%wLdt)H&>r@Oc1&OQz9R3OH z{lQy#zP+-R+W5_uE5&S>6U4?fMOkmMV7h6G#e3H&r2nH3dg~PS=(-Z$xCEu$?4l+0N zXWolH>kPj8Bqe{n1B+0xve*owL9tYOWE^E`Ykw(sUuMxtES=q(c|KyqPDDAGIqwmI zNUBWty)B8fT~9Tb88Hl~sk;jQNHN1UP%mT^2iu0f>f7w_5aVgZ+IV><_&@Lb z)zC|?^zW8j`k|Me9cX6s-wnhDDnR#KMi>5aXa8OJ|3eX2&-T)dy!=1xP^e-195m{G zEaK7y{;L*rG~l~j0O9{zq4(W&qVK_gJ*`Ud==J9-_AgvR&0>C}J9j_$@D}dsgY}qR zFNU$YuL%Nvw>h@V%NII-#BTHJ!8uahzv+`r*(Bn%{QT0w&-w)LCDiVY55r|`Nd$xsSAUNk(i zJt`KQHWt=IE{XI5G_ zOB>b;U;lnf*yg&VM-4=>(pI!@H)khhDz~P(*_x%sYnLQ7TfWHfpxiVY8P(SQz`pjp z`ZiZH2AxzkdzWyAZrH+lORsN(7CF$X%h$?iJKpA6cUY}IoiKWYRMyqylBSK7E59@O zoIGi8=U{ftgZNN|lUry6@hd=Y&d6o!lYO-i$Wz$;E<-rBj{zQtOza#LL{My zAE2`$6`|$DfGya)hp~&kwdS;^fr&?Btj2z(NU;3wEp>(hEis8m3LXeHLILvt4Cmm<%2%P_P>iK=p2_&2 z`4(CHR^-?Csqj*tBY0`<78y)HN{-Q^{nv5mm1}y@W9+ooAa4c~Dw#6lc5?!nxB;xk z<4EJif4>`%AxFa-&@p{Vo=!*YiJRfIzuLK?5Y0p#BP_#ppG30e;V&X8g}Vq5C#@2b ziNf6?J%7uRFG|-a2a~DFBW$DkrQYI%29O(3P;cVfcRoAIg{Z)UWQA$)7Q zp&{q+PK3{H@=T{8BpaEZ+m|cDUDE0%H%Cu*^l7(f>Nvg5#dPvRBx9oGXsK7wjqI!{ zMbqn2-KXNU==XxFFpH;ljGsru6g} z@wn*Ry~G5&8HTPdXlh;*TDi{=1kbOJs<&z8vacm^zuhD4kbf^V;>bQuy(;G&%Vp+R z%g~)!#WnuO>$8^&=6GbCt;u!;Rs67rz(W1$2-7QNkgs;~3xJ-}8f~tX_VLltV?!w`2@L39 zQa3Dr`POqm)z`}Nn&VXhboX^5!+rEmI;w*%Sw!fPj(UXY*Y`C~?bo(DYBICIpvy&`mAyrwAoLY>s|17*6 z<6mHsB|oCtY$1oCrhFdxO=&|bv6)Q7^|-=fk4E@Tw+#Dg20AF85q&+@ z*4L%gwpQ0GuztI#{;5|(p%(XZ_aVnF`SrU)!8>)9!{wS=^*Pf&JVoT!N!RNJ&E%4y zGh2;2a$a3MqTS?fc!B7#X@{Z)L{Ya&rP#_;*Tza!#=-0f9_bP4*g#;h&~tWA2=b8v zuTMM*-qI{LOc;GAJkGtU<@+@kyC+|+UgNi#*=?d;2F-OCtwrfpaH9S0m>UB?5>rIUT)tCFmUz0R?m1b6t?nfDt#G2Fk^`uMIh;Sa-BA zKuw8XEGa2?KCZeVs)Ox3uY6|C>M?BKjQ`_vnfRMG43kr3F?XW_LewC#Fy@8O-uL*A zR@HJ4KP#&45YRyetZE}s-Y+oeR31FixnuD91H=Igr9}&DpQ$2X}1I4o^ z_B`1Hk>-LVR@Hm<<3xO=z-C@iCDRa|j^qGqgPYG--2+-`>yU$~1`B2Cx_1h^EWb+& zZ-09&PjQ!Vhwb+=Pp4c)Am&N+o`3gzAzEHU`U3-@h+q(oMaY5uO2b;F!U&5+AnKM^ zAkza_q-(nh`n-4QKL$XbY_WRP;W95Q4$8Vs8NBuNc@VZ_xF~;VN;A2<_zJ6}{%6Qa= zPrq){O+F4CZ4#}t?~k$#FHtc7`L zHhY)sraiNL*@!{+5F`^83OoT4TEB$f$tUjW8``$@77KWySD<@Qr2;QJf4&l}8D)B+ zCkSHowW?>4^yWSpTSIBlpGH+oiAB_JZg^A|S+IA;P%YM_OT8KNR54o}ROq4Ro?beA3d@IKpWew{*@qQ2$9t9ox)8^Mid+FHA48M)d6QL;8p>X?(`&45J@eRt{ z1d3GOFo0;vbCW)5K+dqg_NLX(dpW9>;}G*$>NUPa9P1cJ^-qV?-KX?@!1cM!$1fxhM`siTb`eqFN6;Lk{_Ux&~y_zmhed8Eh=!F#T4 zg$`zGbT$5QLx$&JjFl83VrLljhM%=BV%6K?D?Z^r{zs$gNfeC;Z^N&n>BJsFk>wbr zg-~IG8VAE#prpSpw%^OAzKrcwX}#p;u(J)2+=EeZHP-Z)d$`a!Vb@h%ICS18T8B!P|3OL6+F{JJ$9~f!oU(i@+M$H zM6lzI%oLd{P9V3e3(;1~gc&~#X~CCt2yw)$jL7(iVJf_F@#TI&tj;GG5L@xm5lR+R zz@f^v3xx}|*(gne@L3`|gojvP7B491VR*Oi?MEOHXJk@WBb;nZI0s{ zZq{RQv-2$*qjW(bCYPslNg^|-gX zG{mPaB-Sr#J#Kc<-+hLbSY2~XLoUa5|&Ax=VV zEdF2v0U(LSgZ4MbSVimwYpCLP_OoOgM<-*RC~-DBmle_Up9qOCx9j=FkvI$SLK zfXZzx!`LScB#&2~OMoU{W>3`6;^GJwTB%J}BzS+Ic=?;rt>TkuqJOSGIJ*Edsso!|TGkd>oU+z$cz zWa>0#HuS?EnU&aZbjPf2_VVZOthMx(^|5SORf8EY$XsjJ62^3eSYClV+wu*Zor%y( z=2~)yx6=>3HVT4qj3_GXUgwk&X~;J?^VL|}09+&>azKZ1Kj5?R{;?LY+!bu>G=$(s z=yUn^96g;>L<|4U+=jt9sWvx7=T+l)WxqaS!OjAW^!zT6e8 zO7up+{JkD>zJ09>p-yOmdZnUhA(|hLx&M=}15MyN3>Pgm67{A*PHu&i9Vx~hZr)8F zn7kdOxi(bHk+;Dcp^xL5?Jb55JUO)zS&fiwVLV*I{$=H)1@Vi*QJ-u{9t7_~GANgLa`h|y zJlv0xZFbS06zyfeu0RZN}&Pd^|kGUQN2`Y@-<}nuF&F$1zO6} zx$^=BWPC|r^57hDBfjgE1a05@K83hw_YNt$sf_?zXv1pxNro z&@xF^Qg%KvAU$c-tB=CaLf!pz!T@PV0>k>dBpk*#c**$9x;Gxs;QTDZgBSYc*{>}9 z%$?^oq8V&d$TKitJ$-WaA}p|w_ervGc3n8Q_wzn16vK9BLXAvc^Hhd}7;nlF9jP~| z6z|M0>e#Kc*Fdb_6>M-KpgcbDt0@Uq{5<_M*`X<2pG)I`&zk2D@`kKbLPt}ix}oli zN7D9SGs@{P^YB9|khTX<4G&VnY;fB7=1zo#jI$YhPtYkI0G|t7d>a zmcPviGT5X!rE5t>}#Ni+~R_a zFDDDtSALBLSI$tSwArWCfzm@!Q8rdW-sZ#flNHswLWzZqT8>wKMYCB!c_36~v`*kv|! zGgfab_>&6Ri^MBI6dUx?3N9$jo=D#_l{UuHxKhIFkj{7sA$ETQQQSGEhPp9|EIzy} z%hkT}*92(liysYZHmX!VuyDzTudlniiKcTlNw0KY)WRnHN#xRTxK&RiLd_QZQ9LB# zUq$xrpUNChF}+C5Ro2vHiHeP~)_F^20+`;=q`NtG@x{nHi~jxbX3#siJS)QT-LD?* zKwV<<5^hK^c2OipfiESFpCzNPy3bAiejFmhuMPL3_@IO4N|-%EibkWVD>HA_-nCQC zwSK2u^4lEBv!9BvQGjGT$#vKD&!$YqMP3tgJebtW4W{Fzd3`%NeO{h|E(XIAm{O(S zc60nnCxR*O_A_H=`wx`SULtS0EX0$dC-}~tl$8H%dl&(ZFQ;pPA7G|rBYglJAm_jb zuuV+OSblA1>b<~OM9kaG)bjL}H2SHvL{GY{UF}Wr=kN#QkL>H_x&T1PzAiLQi`FmM zd>A}`f3%{+WmZ>yGl`$z=DOn{SbL1Adb4g4L{VJ7X?XW}%?pV9&#AWKH6azN*fEO_ zx=B26pUR~_zZlT$m)x~d7}7DL1Jk3sYasZYVeCU1@@aV+WC(2(A5=C~HhByAm1dEN z=93jGl{ZJuURQ64R}gMo+zKvp66s2;$6ASAGaE)HTIz6;wlhfQ>0&|Qmxu}J1_ z-8rWm_s|NbZ1r6RNU)p})rq7Vj~p(sct>xo9Xb5$y`57eoF$0P^bK3y3pt`Mb<6hE zjziapr7p^!LZ29w)*V^*RvIQTY7IlLm%sxpE$9^~#u%d&&c)X66ukXo(PwOxW}h;! zsGClMKllEu+dgh_;XVr$kzyb*mwGzIO8epdW{H zCU!$5ya+qrg|NO9G3D6_nYNp@{^5*HUo!jU+uKx#4pNbxiNC;vG(5HLcVfU7F{ierusWvIA60Ecb$qy(ebftUngs_#)Y5)62uHLuxri zwN+7Mnh^t%mJ;twCmSx8_h5++(rh05lv;C7qVbTl}1*9qv!>Dsv)DGWE* zn){4%Z>-Oz!`If`)M??4ZJI!lWl;6{-S7hRPEDX_l zxw@3!Q@-ZOopyP9O;y!koQ$HX4c+GKe2ft|N~v11-vvdhW|vdontfBnagj`v=<%4@5`=FOJp4?9z3%iifS;Zc{QIgdvwv;dZdwWBxr8nL`ywve_wORWzg@bw|9wTF z;a3mya}Xr4s((Hq+R!83n99@>%55eT-Tau+4=|xBZrsOT7eeW1CC&z;JC0ITRHl45 zA7Tk-wd5VPx0ctdXd-tzbni|&s4x#=Sxg|7W2P+#DAToXS}Ji zg8g5()PKCE1Utb043eOoUwu{V@Zq9stdw#R@w7!`XXDhV7x%s;jx0cFQLrfzheJsb}OI$jd6rp=0;mX5wO6o!^z zXtZSqwNV=3w*FLTc4TQITA8bk={Ch9Ahpu>Nlz0bS9AXso&g zkY(Neh60(`o()aav`02E2q4`c=dgYjEA`5gDIsC(Np9snK} zE>M=yOZhkd@ss>X-TI~@jddVMuKgG1b*rw91E@F@8`f7}cmXMJ6F>)8I2<>|a!$hue_2y* zkT#;Rq(&f4o%572z8eF?l_G`SCL6;S!AS&kH^ ziVf;`9aser1rzh&IO&sV42#Qhtb7O%lIz$9YI@o!g7$iAUc)c3C&DcfIA9e3z&o7I z0UOaJZJzi_*KxZ1V+ zlV??@<(v!eL$y10VoLDuao*OimbjM*a2AL75au2oN3fcjEa$769Hz>*m`zuMUS4hI z-5akd1Zam&@{$t4vp-E9zuHd!Xx;N;w<}e^WDyxAk*-uj1oN12k}qbD=*h>?1^f<+ zL%eZjw?QybsB7AfE!v_FN;N?U~M=fa6m_>bFrv3q6Pz+>$5X-A^rg8xUU>462#)W*-agv2~Lw-0%!&6JI!*c8u2YS&w;*Z4THKvso-N{nqr z--j}PH-4J~(=WUfsWbkOmiu}BTl9+jeYY10BqO0C%E@EkoMwZs`!ei3<0bm!8()Gy zCBIYT-)*7!q99&f=bmxTxC<+bGPWv4I2>UK4ge@OGN)XX6IYtWPP4Yvo?%r{PA+}J z`lE>-tP*=+yKSUuBQ-*~?pqo{m65B524e~Ljb8;Bd*7t-TnJtWz@dW=xQ=Mg_;@BQ z?N1$UtPl8=h($cDx7M5ZXl>?eiOi&QZ@+7QG`i_kDf;HSas{`O;+9c0ckn!r5j4vj zo`X<5a;&89V|kiNwlklAmq{8$GC|p2JO^CzL=#hFYrYDGozh>z(NzQXx3p z=G4>QMWM8v?bJ*Xd1+*cqC`slQEzUp2I-jg4uxj`lsgph(@xn?PEmgi^? zB3ggelxF19k)0_UzU$M!0~i$WwHNNbfULJhx%(xNP}Yg}pyItcTAf@ai$^?@^iZ>q zx-heyg5}MjwP?Ekm(vA+u0;*_oiYmA7i`ZXU z^kyIsCSN`UBysZS;Hpv-?OkA@#Tv0kW8u~!Lbn|>PRqYptqA9jh$J<e?17O*h% z_R8t0%gYz1;-g_Fe5ZJF*Ridk5p_7G--AX%Og- zP88~+x#pN9f@q-Toi8hsZJT%*w@r`9r*j#UBcqcB{jYfPNh537(5idA+u&p`rspKU zVEB|mG|EWf88_)lhD6@h^D-BjhFLW?iH{klpKKqAr|f^slArqMKzsU>3=;LW@LbuE z7}oG;vToyRiq@Janp<{@0b|9H=X7A2z!$qMfY;SiFMVS)PtYN4p)>KLYOTN=sbuqiB_u}!@M}91`Cj^Jmqm>lm z(nHDyPtMA@4s}QOE!A=>TcX`bEgt^9vlbDfIu!XT%va=ih0J_#(+E)QG-vnq*M=f? zd#1xJ%uWPEj{r}$SlO|1Lmg{?t|hiaDjH>}&gN`T3mUBrxsPVnc*_2CPC~UZ!&#OmL~y8|I{z6=H!gc& z8pv?N^W{LWG5ew7yWO4*FxKiq8frCq+|Rg;UHSxa4|%hQ2f^gmgtwGF9WPJaWtXc#p1s4h4$jgUzc8Z~k>A1he#Mk88n3Xx zH}Jkald}L(q?ftQ2}@35gy#3`dq?7a0mp-)E{1Z1CD`4ZN$x!vp_oSAc{)qgb6si5 z=;A6XUerPFUxPSfdvTWSW|WdBTeadQ%RXS5H{SpD{I=UD;4~eRz@jpRe2R9JK@vL8 z;+^-(A_@>nYjtc&LHR3TSm-}Wi!_$f1N#NK@qPj0S=v&2w3 zD11A}Zum1NE$g!5hg3XCDn8eoB}Y_eu9FamG=K3W$Kxdr_o{h?FPHlia?kV5TuGA#Ry18rCUH+ z#(;E*6h(*-1JYaQ354*@O<-W&Z@stHTkpO6W%_Tb(oz<*92Vs{Ca!&u)Yx>uHFX}-wPmw z3L*~*KdV;<`Uk^PWBnNOmvAdl`!i3zBN0yd0Bi8 z^^%WVxZD@_3oUV4MzI&VhwRCerj2Q7;~8{c=iJ_nIXGWk`-NZDc9T-aY1oyI=JRGU&t%p|~Web0&+ z@Y`ew*eebVej94>9UU1q3sMtlO(|LRDZb8m;c#Go+fNP+tGTN8rcp-ycZQ$%-y9vr zDMsy1%CPg{1Q08Rzx=!4Tv@4aSt!UBR%EdvOUp|@)zm{FDr`VC;vHg=thk!twWFGyM}jFB^dzb{f9xWV5&!bbb2L=V{~Ziu_Wy~ zt=-YWw}Ol+qR5$M15;WY#baW}`OpPjGC-}Q=*V4C!`lzFXdR1Pv378oGHalvKB-yc z>;m_+qNr737kEkDjs-jP2i0dvHS@kE+3(4hoHTB}EFBfCII+Cp7L0HjI1_*RN^MlhBREutk}C9sDE}$o&y#8c)-C zc&tADdApGw&o^gPQp4q=Uvpaocy#urbZJLVy@u@ya6CkG@06?IX%^YjsdurTb67eg zthM2uAueX-ygJ)$L9OC!ck7PlZ)gc?xx3$8tUot4hC^CS?G;1Z@|{Sopmh9_-J+1@ za;}3S`Kd>kAl}!ZC*G&gL6HZ9>qAF;AN@S4&Jh>CgTG}mZrA*ZEPw7t$9Rjc_N}J` zOgj+4A}H>?#^MQ`N$Ky&uGu>2g_-PoP%cl%gMQ=pgzxuUtTMaFeVC_V-bJ6H+_eLB zCtNzKy@_(EYYJo|KTRuE>r>ndF=cKKSiQ&Jyg-o*QjB_Y@7?OrZn?=N zz5KyvhQcc}QLizeq*PE(jTIIwPLQ@eaXci=bEG2SWP!yoRibJ+ z4DZLnHZDJ|+^mCgeHx#6(>M9tpvGh6=bheNC0dF(M(muDEQykBRMYB*dDm8r&PZz* zb?b@$(zz5X)P))QU2S=HTd3<}yjFz7j?M@uiUo02-i);AGtTwl_m5_cZ4#YVQ^jfJ zWwVpX4z{mHYnE8Bhg6;Fjy2f@{Hh!_Z4&3)em>lzwobcJ-QEu>5h*ay8{o*uemTmIUNLuNILSK5=8nyoY}27+C*siX zUwVd5?UY0Ql`A(u-K(Cx;B;U|ogOQZFW1yONba>r!tA>T(IrI;(GIF95-G`DZ>$HH@O!(h>Ap4W?Q zGH&e4vAHLp{m1Mn_2s4a?(eSf2YFT^tS~5b?e8j@iX3M=Pn&T09y|O{^7PVdakI1V zK0r)Tzb(w)!xyBh8Ykc=8sqQlv-r|y&*WW~TTUjG^Tyu$(Q@G@k)L@&Y%c`hYE>6S z2MUcTJAV5nF=vTr`Awst|0=1$z5Q+IZuG7_GB1Ny;szbkpw|;vB`yj5AV2-Fg44pF zy3youd_N1e@+S4|{bViIMShf&Dn?b)UdNpabqpn}N{344K!_QjvO6_e=d7 zpfOHy7a8mw;XjKv+IWd)S+io7{`z!3biLG(Md!BHM`dFVyNiNI$v{KBfj2VyqwQV3 zUPuT{_sqPZ_+8%TQge)qNG-?MH~E#|HQwFM(=VbvFX>j0pFb0(JpJI7Nj0tVZ>xNB z;R9yMNjzq%UG#0bWZ!8DD!#DL{Df0Rx@M(NVL?^R%;e|ap=kp0Uw!@Ii`R4>h;S)I z@hAA3S9iyHw0adNW~+C-?RsH&lWv9*D`jgsrET@L>gH;f{l^iTuemi20SP6D`pP!v z;p|lWgoTdvmoHi|IkMQcuCuEuM1Jjv*=I@Stsh1%rV3RGL=Gk5WxpRHbvFk*4#Qe0 z$eK7VUq#0)Ul|YgyJbzB$Dmq}o|A&!egrSa?o1|HI+1-xLSo?2j>Y*ftW%uVLc*E6 zFu{W)nXhj_8N{3B#@_t4$lovRYN>jVtmn*Vw!W>N?n)T zUH1)FY!_m7F8c_;daXw)mb|{mR}b91&&1jZp`4^sCp5(7KB^lwp33o@nZ#HD>^gIyHcU{FD0+L!qs3jBx1j?=%p%i1!y_%r+!5+ z!Q_#hem=?xhh*?78^i3BY^{09gMa? z@`aIZ*(}q6G4necqz^`dgxU7LH@1oQZxDAFCtc)X=_#!ujU-;_V!+W-YsHB>q z7AB1PX`@s*cuLbdI0~a4ViYISop`i*qU{c#!G}tDE9{NAMc^-MFS5ldIgWP~+jZ6# z19%m}L~ku=pmCI9r@r1|m-%5O@Qxq>AkXSBG~0|1lwO)(BY6YCfl!1wj7QjK^n((p zw5|P|u2=ig;9_V9P!S(m!_SC#X}wo9*39~mG*MqioY!vQ@(!zRx33S5P^ggAS|B&e z{+=k0;D2!Cq$C7>%`O9Uby~PbT`b?vGoal6+mIbuDkxL*N~z~VOCts^2r%TJX|S~h z23EE!`+S!hd(~!l*n2=P#4SV^U}YrE?4%sqhZmhW zAwzG&s;+Q~(+c^OGw1}@!s_E^ScX?cf4u{Me&yyE)QDn|$Z%U_^}fC^gJ}NjOZzt# z3s`D^J9Pm!16cD%c+h~%&j7B0#9^V5-9WHX7H`V(jaxP#{!9A95r+hh((bmZPW}6H z@Jao5d&>6!gVp^V--eT^V50>G_jTJRD{Mxv0( zbHX7-*s=QF^3m-9zzEMRxJi?ut-Acqnlif#kwE%d5<&nTtr0y2Rh*ZwD-kZr|6`^J z4Y-+s(?Pi5qn8I91eESBeRMt=NlIrKx&?jukB zIjA9I*Wblw+q5R~?*R3O20VOl=zg)pvpYD}TTXL4|9He!jkZaBZ063kCZ4e^{iAW_ zh6v_wGj$5eshCdf40yU4rOsWYL$?mY?+hH7$2@Klb?@VGH46|80LuebU(nZuVNE#pLu23 zSfyP)Zw>86Oz9pMqa~h@pkH(R*wZiWjP>;+4>4Gl%=KU?xBB?fx$*_m)A14M87EYb z9EG&0{DrNiv-nzSu&b5WxZB1Oa$TkelgqSWft9)|TT9I25j(cDu~<@wPF!CNTwL!Y zdR%Lq7sW?hKW0l83dEG{wx;uDGq{q>Yj!9BrP##~PYMCMMlht837(kq&kJX927{tRVr>nZ(;{R6QNwikKU9;BcJ(@YIJmEP`u@k;G>#B11(W=*cS=d@uTdIF=tZ zb(9S@|IT@2+W>Wx0=x&6shM7yJ#q@2JxlCx;CN3qA!49$I1ii&7!?Z88qUTQxzTj8 z&*6D=9beZBr+p%%reP6k(cA948t8e(UNcTll?k@&muj+JD)S}G&zH%rG4k(>RTyhb zaz1bgKht4?PGE)2FZ~#Uc+Q*K!5c~1{rCaFZY z*us3;x|aX+-86P?Y12G|ASjOyRgRaKj6p1N3I;%sCm~kr;v(=<6{rC+=no)1J_%?w zdL$ilvBXS4^e+FQBeoA47z1ZVqs%Tk$~?QH`<=eNMqCd`qy3bzI3^TGn?;d!1IeE- z0-e5$6`8JConEN`o_1H_&DpT^5ZRDM4u$OjF2pDw$kYV*auMWVz!s$zB#~>qfh7qU zTvP=l$+?xQ?~>H-4!0%&%d;tDtqhFXbO-#G<2-2KXYC?y@N>DlX6daG5b7>RW6^6k zh?2UlGnN%Mjve_nH(Ea45A}jD0{I5z5On^DZJ>V*J=9g zcY(%AFr;0NC$9D{j}JDA4RGF{oJqKR8?X*g+4^5OH69pN0JcXhWZ>c~DcSrLk;yh6mQ^3 zT|Z=4?}WH#G|1uq<2R7|Akxm3aid*8Wmn+Tp-v8)grDC*wZwt>#W0EJJjFoZ|C{NS z0@CyGb&6P;2L*UL*z%#(`+e+NVekpy9hBh%r>TL8PwlzL#ikjE1`1h4%$5Vg%*i`I z-lq;L=S@}4ne+PY-tckI1~_iXn>XseYpAiI1Ens}Glled*!o@hS3vBD9c;bpgyDm- ze84=(5_;(|cjbG|iC5u*T-H?YNK7I-ESMwK#)S&(Lpk3eiNfpBDo0o=c5+=0)1pgT zM&PL);ww3($@`^*?ORenF~FiDB!S}t$`2=SMLuqD>e)1%YAI5;;R*`AGzFA63>Ge| zxvC97IkP;2zl2a-7Romjn!edbYe<87>4lvRKfG(^Q!2=Mg1J>~^F^^_Kj36u3u#Fm zCyG74%!3AnF6@v!&NBb`M*-3#gD%_02{NfZ*AK)Sv6Kb`1Dn3I{H?8Lp-rJ0DwH{J znCdOJU!E14@x7!@U64Pu6$AQ zp8lQ?Vr#L2vRYcKW5EK)XYJ-*VD;A!rv6-D63U8~Ka(|~52>hU`|U@==(92Q3$``S zWbpvinX`I!V+i;cyV+2xDFYECAeIIjpmR44A_FEaqe#|~{St@Wfn(O}43N%w04gs! zmA5wK>M|2W4+mgleygUEI*cK(U^C)@IM4N>6HX#r4saX5y;A{jd*}^?omPI*Q=?&I zla3z$oK z25O$M;Kj-yV+%}^bQ5pZ+qEWkIvxk;pqo1pzUTOUbbJ)ve^~Wajex3?DFXOLB+<=U z4&^F7QU57yai6*Zq`V%+HAs>|e;hG0)y zT9di;S=)V0JKA58^<Kw572 zM1+@3B)^*^ZB#lE?ohf<=QTpw(dcnZ$#T*{NC};?3QTJDt}oiIUrxpM_mvbPcD2sO+^Ae~iz7KUn%3Y)iuq7GO#Y~cfiB+nrl^xms%>iB4BkxiaBe6VQt zZrbVTmNTlX3tZCdP=LjOJp~^g%Eq zEJ)oR{9LB|X_+^|njY^RB~EK41W5!!qEXDFNURqttTI4Y5TV2EHKt9OQ^x`;`oF)d z212Q{o~ijCqaB!t^ec2Ic1-LaP}{b@;fky%QW{<09goSH(CZrr7iJtB-=PxbKi-*ZE+A&b3-wxSd#{f@L4Q>K;Gqn6;Ob+| z_JhLL3$Xzp6ivjY9Kvozud!G80n9i3w4P%yP^0)K^;~5C5w`R*D&196Jyb%^C_^)a z2{qjNliVVu+XDho(Zj|pJ%K-cAC2f+?BvN@0fTZRFrs6tRW6r6~(5(@Y zC$?5l>{Z#dcwKD+7PYw3;FeOb+tzSscG`kqE200%wtozpE8;T|?R>dxyzO@2@O_qB zLF?A3lCek_N5e22KHPx$C_~})#*~A2dE2SzAL!8*7{xr=F;1cz4g8N<7}^*lINZTD zbK^h-njF*Le8+=E8HJ|I>o6r$7!fJsc)EGXty86V#{^|v`_ -- Mailing list: `openstack-dev@lists.openstack.org - `_ with - *[gnocchi]* in the `Subject` header. - -Why Gnocchi? ------------- - -Gnocchi has been created to fulfill the need of a time series database usable -in the context of cloud computing: providing the ability to store large -quantities of metrics. It has been designed to handle large amount of measures -being stored, while being performant, scalable and fault-tolerant. While doing -this, the goal was to be sure to not build any hard dependency on any complex -storage system. - -The Gnocchi project was started in 2014 as a spin-off of the `OpenStack -Ceilometer`_ project to address the performance issues that Ceilometer -encountered while using standard databases as a storage backends for metrics. -More information are available on `Julien's blog post on Gnocchi -`_. - -.. _`OpenStack Ceilometer`: https://docs.openstack.org/developer/ceilometer/ - -Documentation -------------- - -.. toctree:: - :maxdepth: 1 - - architecture - install - running - client - rest - statsd - grafana - nagios - collectd - glossary - releasenotes/index.rst diff --git a/doc/source/install.rst b/doc/source/install.rst deleted file mode 100644 index 897107a1d..000000000 --- a/doc/source/install.rst +++ /dev/null @@ -1,191 +0,0 @@ -============== - Installation -============== - -.. _installation: - -Installation -============ - -To install Gnocchi using `pip`, just type:: - - pip install gnocchi - -Depending on the drivers and features you want to use (see :doc:`architecture` -for which driver to pick), you need to install extra variants using, for -example:: - - pip install gnocchi[postgresql,ceph,keystone] - -This would install PostgreSQL support for the indexer, Ceph support for -storage, and Keystone support for authentication and authorization. - -The list of variants available is: - -* keystone – provides Keystone authentication support -* mysql - provides MySQL indexer support -* postgresql – provides PostgreSQL indexer support -* swift – provides OpenStack Swift storage support -* s3 – provides Amazon S3 storage support -* ceph – provides common part of Ceph storage support -* ceph_recommended_lib – provides Ceph (>=0.80) storage support -* ceph_alternative_lib – provides Ceph (>=10.1.0) storage support -* file – provides file driver support -* redis – provides Redis storage support -* doc – documentation building support -* test – unit and functional tests support - -To install Gnocchi from source, run the standard Python installation -procedure:: - - pip install -e . - -Again, depending on the drivers and features you want to use, you need to -install extra variants using, for example:: - - pip install -e .[postgresql,ceph,ceph_recommended_lib] - - -Ceph requirements ------------------ - -The ceph driver needs to have a Ceph user and a pool already created. They can -be created for example with: - -:: - - ceph osd pool create metrics 8 8 - ceph auth get-or-create client.gnocchi mon "allow r" osd "allow rwx pool=metrics" - - -Gnocchi leverages some librados features (omap, async, operation context) -available in python binding only since python-rados >= 10.1.0. To handle this, -Gnocchi uses 'cradox' python library which has exactly the same API but works -with Ceph >= 0.80.0. - -If Ceph and python-rados are >= 10.1.0, cradox python library becomes optional -but is still recommended. - - -Configuration -============= - -Configuration file -------------------- - -By default, gnocchi looks for its configuration file in the following places, -in order: - -* ``~/.gnocchi/gnocchi.conf`` -* ``~/gnocchi.conf`` -* ``/etc/gnocchi/gnocchi.conf`` -* ``/etc/gnocchi.conf`` -* ``~/gnocchi/gnocchi.conf.d`` -* ``~/gnocchi.conf.d`` -* ``/etc/gnocchi/gnocchi.conf.d`` -* ``/etc/gnocchi.conf.d`` - - -No config file is provided with the source code; it will be created during the -installation. In case where no configuration file was installed, one can be -easily created by running: - -:: - - gnocchi-config-generator > /path/to/gnocchi.conf - -Configure Gnocchi by editing the appropriate file. - -The configuration file should be pretty explicit, but here are some of the base -options you want to change and configure: - -+---------------------+---------------------------------------------------+ -| Option name | Help | -+=====================+===================================================+ -| storage.driver | The storage driver for metrics. | -+---------------------+---------------------------------------------------+ -| indexer.url | URL to your indexer. | -+---------------------+---------------------------------------------------+ -| storage.file_* | Configuration options to store files | -| | if you use the file storage driver. | -+---------------------+---------------------------------------------------+ -| storage.swift_* | Configuration options to access Swift | -| | if you use the Swift storage driver. | -+---------------------+---------------------------------------------------+ -| storage.ceph_* | Configuration options to access Ceph | -| | if you use the Ceph storage driver. | -+---------------------+---------------------------------------------------+ -| storage.s3_* | Configuration options to access S3 | -| | if you use the S3 storage driver. | -+---------------------+---------------------------------------------------+ -| storage.redis_* | Configuration options to access Redis | -| | if you use the Redis storage driver. | -+---------------------+---------------------------------------------------+ - -Configuring authentication ------------------------------ - -The API server supports different authentication methods: `basic` (the default) -which uses the standard HTTP `Authorization` header or `keystone` to use -`OpenStack Keystone`_. If you successfully installed the `keystone` flavor -using `pip` (see :ref:`installation`), you can set `api.auth_mode` to -`keystone` to enable Keystone authentication. - -.. _`Paste Deployment`: http://pythonpaste.org/deploy/ -.. _`OpenStack Keystone`: http://launchpad.net/keystone - -Initialization -============== - -Once you have configured Gnocchi properly you need to initialize the indexer -and storage: - -:: - - gnocchi-upgrade - - -Upgrading -========= -In order to upgrade from a previous version of Gnocchi, you need to make sure -that your indexer and storage are properly upgraded. Run the following: - -1. Stop the old version of Gnocchi API server and `gnocchi-statsd` daemon - -2. Stop the old version of `gnocchi-metricd` daemon - -.. note:: - - Data in backlog is never migrated between versions. Ensure the backlog is - empty before any upgrade to ensure data is not lost. - -3. Install the new version of Gnocchi - -4. Run `gnocchi-upgrade` - This can take several hours depending on the size of your index and - storage. - -5. Start the new Gnocchi API server, `gnocchi-metricd` - and `gnocchi-statsd` daemons - - -Installation Using Devstack -=========================== - -To enable Gnocchi in `devstack`_, add the following to local.conf: - -:: - - enable_plugin gnocchi https://github.com/openstack/gnocchi master - -To enable Grafana support in devstack, you can also enable `gnocchi-grafana`:: - - enable_service gnocchi-grafana - -Then, you can start devstack: - -:: - - ./stack.sh - -.. _devstack: http://devstack.org diff --git a/doc/source/nagios.rst b/doc/source/nagios.rst deleted file mode 100644 index 72d2556ca..000000000 --- a/doc/source/nagios.rst +++ /dev/null @@ -1,19 +0,0 @@ -===================== -Nagios/Icinga support -===================== - -`Nagios`_ and `Icinga`_ has support for Gnocchi through a Gnocchi-nagios -service. It can be installed with pip:: - - pip install gnocchi-nagios - -`Source`_ and `Documentation`_ are also available. - -Gnocchi-nagios collects perfdata files generated by `Nagios`_ or `Icinga`_; -transforms them into Gnocchi resources, metrics and measures format; and -publishes them to the Gnocchi REST API. - -.. _`Nagios`: https://www.nagios.org/ -.. _`Icinga`: https://www.icinga.com/ -.. _`Documentation`: http://gnocchi-nagios.readthedocs.io/en/latest/ -.. _`Source`: https://github.com/sileht/gnocchi-nagios diff --git a/doc/source/releasenotes/2.1.rst b/doc/source/releasenotes/2.1.rst deleted file mode 100644 index 75b128815..000000000 --- a/doc/source/releasenotes/2.1.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - 2.1 Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/2.1 diff --git a/doc/source/releasenotes/2.2.rst b/doc/source/releasenotes/2.2.rst deleted file mode 100644 index fea024d6e..000000000 --- a/doc/source/releasenotes/2.2.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - 2.2 Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/2.2 diff --git a/doc/source/releasenotes/3.0.rst b/doc/source/releasenotes/3.0.rst deleted file mode 100644 index 4f664099a..000000000 --- a/doc/source/releasenotes/3.0.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - 3.0 Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/3.0 diff --git a/doc/source/releasenotes/3.1.rst b/doc/source/releasenotes/3.1.rst deleted file mode 100644 index 9673b4a81..000000000 --- a/doc/source/releasenotes/3.1.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - 3.1 Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/3.1 diff --git a/doc/source/releasenotes/index.rst b/doc/source/releasenotes/index.rst deleted file mode 100644 index 9b4032fac..000000000 --- a/doc/source/releasenotes/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -Release Notes -============= - -.. toctree:: - :maxdepth: 2 - - unreleased - 3.1 - 3.0 - 2.2 - 2.1 diff --git a/doc/source/releasenotes/unreleased.rst b/doc/source/releasenotes/unreleased.rst deleted file mode 100644 index 875030f9d..000000000 --- a/doc/source/releasenotes/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================ -Current Series Release Notes -============================ - -.. release-notes:: diff --git a/doc/source/rest.j2 b/doc/source/rest.j2 deleted file mode 100644 index c06c845de..000000000 --- a/doc/source/rest.j2 +++ /dev/null @@ -1,586 +0,0 @@ -================ - REST API Usage -================ - -Authentication -============== - -By default, the authentication is configured to the "basic" mode. You need to -provide an `Authorization` header in your HTTP requests with a valid username -(the password is not used). The "admin" password is granted all privileges, -whereas any other username is recognize as having standard permissions. - -You can customize permissions by specifying a different `policy_file` than the -default one. - -If you set the `api.auth_mode` value to `keystone`, the OpenStack Keystone -middleware will be enabled for authentication. It is then needed to -authenticate against Keystone and provide a `X-Auth-Token` header with a valid -token for each request sent to Gnocchi's API. - -Metrics -======= - -Gnocchi provides an object type that is called *metric*. A metric designates -any thing that can be measured: the CPU usage of a server, the temperature of a -room or the number of bytes sent by a network interface. - -A metric only has a few properties: a UUID to identify it, a name, the archive -policy that will be used to store and aggregate the measures. - -To create a metric, the following API request should be used: - -{{ scenarios['create-metric']['doc'] }} - -Once created, you can retrieve the metric information: - -{{ scenarios['get-metric']['doc'] }} - -To retrieve the list of all the metrics created, use the following request: - -{{ scenarios['list-metric']['doc'] }} - -.. note:: - - Considering the large volume of metrics Gnocchi will store, query results are - limited to `max_limit` value set in the configuration file. Returned results - are ordered by metrics' id values. To retrieve the next page of results, the - id of a metric should be given as `marker` for the beginning of the next page - of results. - -Default ordering and limits as well as page start can be modified -using query parameters: - -{{ scenarios['list-metric-pagination']['doc'] }} - -It is possible to send measures to the metric: - -{{ scenarios['post-measures']['doc'] }} - -If there are no errors, Gnocchi does not return a response body, only a simple -status code. It is possible to provide any number of measures. - -.. IMPORTANT:: - - While it is possible to send any number of (timestamp, value), it is still - needed to honor constraints defined by the archive policy used by the metric, - such as the maximum timespan. - - -Once measures are sent, it is possible to retrieve them using *GET* on the same -endpoint: - -{{ scenarios['get-measures']['doc'] }} - -Depending on the driver, there may be some lag after POSTing measures before -they are processed and queryable. To ensure your query returns all measures -that have been POSTed, you can force any unprocessed measures to be handled: - -{{ scenarios['get-measures-refresh']['doc'] }} - -.. note:: - - Depending on the amount of data that is unprocessed, `refresh` may add - some overhead to your query. - -The list of points returned is composed of tuples with (timestamp, granularity, -value) sorted by timestamp. The granularity is the timespan covered by -aggregation for this point. - -It is possible to filter the measures over a time range by specifying the -*start* and/or *stop* parameters to the query with timestamp. The timestamp -format can be either a floating number (UNIX epoch) or an ISO8601 formated -timestamp: - -{{ scenarios['get-measures-from']['doc'] }} - -By default, the aggregated values that are returned use the *mean* aggregation -method. It is possible to request for any other method by specifying the -*aggregation* query parameter: - -{{ scenarios['get-measures-max']['doc'] }} - -The list of aggregation method available is: *mean*, *sum*, *last*, *max*, -*min*, *std*, *median*, *first*, *count* and *Npct* (with 0 < N < 100). - -It's possible to provide the `granularity` argument to specify the granularity -to retrieve, rather than all the granularities available: - -{{ scenarios['get-measures-granularity']['doc'] }} - -In addition to granularities defined by the archive policy, measures can be -resampled to a new granularity. - -{{ scenarios['get-measures-resample']['doc'] }} - -.. note:: - - Depending on the aggregation method and frequency of measures, resampled - data may lack accuracy as it is working against previously aggregated data. - -Measures batching -================= -It is also possible to batch measures sending, i.e. send several measures for -different metrics in a simple call: - -{{ scenarios['post-measures-batch']['doc'] }} - -Or using named metrics of resources: - -{{ scenarios['post-measures-batch-named']['doc'] }} - -If some named metrics specified in the batch request do not exist, Gnocchi can -try to create them as long as an archive policy rule matches: - -{{ scenarios['post-measures-batch-named-create']['doc'] }} - - -Archive Policy -============== - -When sending measures for a metric to Gnocchi, the values are dynamically -aggregated. That means that Gnocchi does not store all sent measures, but -aggregates them over a certain period of time. Gnocchi provides several -aggregation methods (mean, min, max, sum…) that are builtin. - -An archive policy is defined by a list of items in the `definition` field. Each -item is composed of the timespan and the level of precision that must be kept -when aggregating data, determined using at least 2 of the `points`, -`granularity` and `timespan` fields. For example, an item might be defined -as 12 points over 1 hour (one point every 5 minutes), or 1 point every 1 hour -over 1 day (24 points). - -By default, new measures can only be processed if they have timestamps in the -future or part of the last aggregation period. The last aggregation period size -is based on the largest granularity defined in the archive policy definition. -To allow processing measures that are older than the period, the `back_window` -parameter can be used to set the number of coarsest periods to keep. That way -it is possible to process measures that are older than the last timestamp -period boundary. - -For example, if an archive policy is defined with coarsest aggregation of 1 -hour, and the last point processed has a timestamp of 14:34, it's possible to -process measures back to 14:00 with a `back_window` of 0. If the `back_window` -is set to 2, it will be possible to send measures with timestamp back to 12:00 -(14:00 minus 2 times 1 hour). - -The REST API allows to create archive policies in this way: - -{{ scenarios['create-archive-policy']['doc'] }} - -By default, the aggregation methods computed and stored are the ones defined -with `default_aggregation_methods` in the configuration file. It is possible to -change the aggregation methods used in an archive policy by specifying the list -of aggregation method to use in the `aggregation_methods` attribute of an -archive policy. - -{{ scenarios['create-archive-policy-without-max']['doc'] }} - -The list of aggregation methods can either be: - -- a list of aggregation methods to use, e.g. `["mean", "max"]` - -- a list of methods to remove (prefixed by `-`) and/or to add (prefixed by `+`) - to the default list (e.g. `["+mean", "-last"]`) - -If `*` is included in the list, it's substituted by the list of all supported -aggregation methods. - -Once the archive policy is created, the complete set of properties is computed -and returned, with the URL of the archive policy. This URL can be used to -retrieve the details of the archive policy later: - -{{ scenarios['get-archive-policy']['doc'] }} - -It is also possible to list archive policies: - -{{ scenarios['list-archive-policy']['doc'] }} - -Existing archive policies can be modified to retain more or less data depending -on requirements. If the policy coverage is expanded, measures are not -retroactively calculated as backfill to accommodate the new timespan: - -{{ scenarios['update-archive-policy']['doc'] }} - -.. note:: - - Granularities cannot be changed to a different rate. Also, granularities - cannot be added or dropped from a policy. - -It is possible to delete an archive policy if it is not used by any metric: - -{{ scenarios['delete-archive-policy']['doc'] }} - -.. note:: - - An archive policy cannot be deleted until all metrics associated with it - are removed by a metricd daemon. - - -Archive Policy Rule -=================== - -Gnocchi provides the ability to define a mapping called `archive_policy_rule`. -An archive policy rule defines a mapping between a metric and an archive policy. -This gives users the ability to pre-define rules so an archive policy is assigned to -metrics based on a matched pattern. - -An archive policy rule has a few properties: a name to identify it, an archive -policy name that will be used to store the policy name and metric pattern to -match metric names. - -An archive policy rule for example could be a mapping to default a medium archive -policy for any volume metric with a pattern matching `volume.*`. When a sample metric -is posted with a name of `volume.size`, that would match the pattern and the -rule applies and sets the archive policy to medium. If multiple rules match, -the longest matching rule is taken. For example, if two rules exists which -match `*` and `disk.*`, a `disk.io.rate` metric would match the `disk.*` rule -rather than `*` rule. - -To create a rule, the following API request should be used: - -{{ scenarios['create-archive-policy-rule']['doc'] }} - -The `metric_pattern` is used to pattern match so as some examples, - -- `*` matches anything -- `disk.*` matches disk.io -- `disk.io.*` matches disk.io.rate - -Once created, you can retrieve the rule information: - -{{ scenarios['get-archive-policy-rule']['doc'] }} - -It is also possible to list archive policy rules. The result set is ordered by -the `metric_pattern`, in reverse alphabetical order: - -{{ scenarios['list-archive-policy-rule']['doc'] }} - -It is possible to delete an archive policy rule: - -{{ scenarios['delete-archive-policy-rule']['doc'] }} - -Resources -========= - -Gnocchi provides the ability to store and index resources. Each resource has a -type. The basic type of resources is *generic*, but more specialized subtypes -also exist, especially to describe OpenStack resources. - -The REST API allows to manipulate resources. To create a generic resource: - -{{ scenarios['create-resource-generic']['doc'] }} - -The *id*, *user_id* and *project_id* attributes must be UUID. The timestamp -describing the lifespan of the resource are optional, and *started_at* is by -default set to the current timestamp. - -It's possible to retrieve the resource by the URL provided in the `Location` -header. - -More specialized resources can be created. For example, the *instance* is used -to describe an OpenStack instance as managed by Nova_. - -{{ scenarios['create-resource-instance']['doc'] }} - -All specialized types have their own optional and mandatory attributes, -but they all include attributes from the generic type as well. - -It is possible to create metrics at the same time you create a resource to save -some requests: - -{{ scenarios['create-resource-with-new-metrics']['doc'] }} - -To retrieve a resource by its URL provided by the `Location` header at creation -time: - -{{ scenarios['get-resource-generic']['doc'] }} - -It's possible to modify a resource by re-uploading it partially with the -modified fields: - -{{ scenarios['patch-resource']['doc'] }} - -And to retrieve its modification history: - -{{ scenarios['get-patched-instance-history']['doc'] }} - -It is possible to delete a resource altogether: - -{{ scenarios['delete-resource-generic']['doc'] }} - -It is also possible to delete a batch of resources based on attribute values, and -returns a number of deleted resources. - -To delete resources based on ids: - -{{ scenarios['delete-resources-by-ids']['doc'] }} - -or delete resources based on time: - -{{ scenarios['delete-resources-by-time']['doc']}} - -.. IMPORTANT:: - - When a resource is deleted, all its associated metrics are deleted at the - same time. - - When a batch of resources are deleted, an attribute filter is required to - avoid deletion of the entire database. - - -All resources can be listed, either by using the `generic` type that will list -all types of resources, or by filtering on their resource type: - -{{ scenarios['list-resource-generic']['doc'] }} - -No attributes specific to the resource type are retrieved when using the -`generic` endpoint. To retrieve the details, either list using the specific -resource type endpoint: - -{{ scenarios['list-resource-instance']['doc'] }} - -or using `details=true` in the query parameter: - -{{ scenarios['list-resource-generic-details']['doc'] }} - -.. note:: - - Similar to metric list, query results are limited to `max_limit` value set in - the configuration file. - -Returned results represent a single page of data and are ordered by resouces' -revision_start time and started_at values: - -{{ scenarios['list-resource-generic-pagination']['doc'] }} - -Each resource can be linked to any number of metrics. The `metrics` attributes -is a key/value field where the key is the name of the relationship and -the value is a metric: - -{{ scenarios['create-resource-instance-with-metrics']['doc'] }} - -It's also possible to create metrics dynamically while creating a resource: - -{{ scenarios['create-resource-instance-with-dynamic-metrics']['doc'] }} - -The metric associated with a resource can be accessed and manipulated using the -usual `/v1/metric` endpoint or using the named relationship with the resource: - -{{ scenarios['get-resource-named-metrics-measures']['doc'] }} - -The same endpoint can be used to append metrics to a resource: - -{{ scenarios['append-metrics-to-resource']['doc'] }} - -.. _Nova: http://launchpad.net/nova - -Resource Types -============== - -Gnocchi is able to manage resource types with custom attributes. - -To create a new resource type: - -{{ scenarios['create-resource-type']['doc'] }} - -Then to retrieve its description: - -{{ scenarios['get-resource-type']['doc'] }} - -All resource types can be listed like this: - -{{ scenarios['list-resource-type']['doc'] }} - -It can also be deleted if no more resources are associated to it: - -{{ scenarios['delete-resource-type']['doc'] }} - -Attributes can be added or removed: - -{{ scenarios['patch-resource-type']['doc'] }} - -Creating resource type means creation of new tables on the indexer backend. -This is heavy operation that will lock some tables for a short amount of times. -When the resource type is created, its initial `state` is `creating`. When the -new tables have been created, the state switches to `active` and the new -resource type is ready to be used. If something unexpected occurs during this -step, the state switches to `creation_error`. - -The same behavior occurs when the resource type is deleted. The state starts to -switch to `deleting`, the resource type is no more usable. Then the tables are -removed and the finally the resource_type is really deleted from the database. -If some unexpected occurs the state switches to `deletion_error`. - -Searching for resources -======================= - -It's possible to search for resources using a query mechanism, using the -`POST` method and uploading a JSON formatted query. - -When listing resources, it is possible to filter resources based on attributes -values: - -{{ scenarios['search-resource-for-user']['doc'] }} - -Or even: - -{{ scenarios['search-resource-for-host-like']['doc'] }} - -Complex operators such as `and` and `or` are also available: - -{{ scenarios['search-resource-for-user-after-timestamp']['doc'] }} - -Details about the resource can also be retrieved at the same time: - -{{ scenarios['search-resource-for-user-details']['doc'] }} - -It's possible to search for old revisions of resources in the same ways: - -{{ scenarios['search-resource-history']['doc'] }} - -It is also possible to send the *history* parameter in the *Accept* header: - -{{ scenarios['search-resource-history-in-accept']['doc'] }} - -The timerange of the history can be set, too: - -{{ scenarios['search-resource-history-partial']['doc'] }} - -The supported operators are: equal to (`=`, `==` or `eq`), less than (`<` or -`lt`), greater than (`>` or `gt`), less than or equal to (`<=`, `le` or `≤`) -greater than or equal to (`>=`, `ge` or `≥`) not equal to (`!=`, `ne` or `≠`), -value is in (`in`), value is like (`like`), or (`or` or `∨`), and (`and` or -`∧`) and negation (`not`). - -The special attribute `lifespan` which is equivalent to `ended_at - started_at` -is also available in the filtering queries. - -{{ scenarios['search-resource-lifespan']['doc'] }} - - -Searching for values in metrics -=============================== - -It is possible to search for values in metrics. For example, this will look for -all values that are greater than or equal to 50 if we add 23 to them and that -are not equal to 55. You have to specify the list of metrics to look into by -using the `metric_id` query parameter several times. - -{{ scenarios['search-value-in-metric']['doc'] }} - -And it is possible to search for values in metrics by using one or multiple -granularities: - -{{ scenarios['search-value-in-metrics-by-granularity']['doc'] }} - -You can specify a time range to look for by specifying the `start` and/or -`stop` query parameter, and the aggregation method to use by specifying the -`aggregation` query parameter. - -The supported operators are: equal to (`=`, `==` or `eq`), lesser than (`<` or -`lt`), greater than (`>` or `gt`), less than or equal to (`<=`, `le` or `≤`) -greater than or equal to (`>=`, `ge` or `≥`) not equal to (`!=`, `ne` or `≠`), -addition (`+` or `add`), substraction (`-` or `sub`), multiplication (`*`, -`mul` or `×`), division (`/`, `div` or `÷`). These operations take either one -argument, and in this case the second argument passed is the value, or it. - -The operators or (`or` or `∨`), and (`and` or `∧`) and `not` are also -supported, and take a list of arguments as parameters. - -Aggregation across metrics -========================== - -Gnocchi allows to do on-the-fly aggregation of already aggregated data of -metrics. - -It can also be done by providing the list of metrics to aggregate: - -{{ scenarios['get-across-metrics-measures-by-metric-ids']['doc'] }} - -.. Note:: - - This aggregation is done against the aggregates built and updated for - a metric when new measurements are posted in Gnocchi. Therefore, the aggregate - of this already aggregated data may not have sense for certain kind of - aggregation method (e.g. stdev). - -By default, the measures are aggregated using the aggregation method provided, -e.g. you'll get a mean of means, or a max of maxs. You can specify what method -to use over the retrieved aggregation by using the `reaggregation` parameter: - -{{ scenarios['get-across-metrics-measures-by-metric-ids-reaggregate']['doc'] }} - -It's also possible to do that aggregation on metrics linked to resources. In -order to select these resources, the following endpoint accepts a query such as -the one described in `Searching for resources`_. - -{{ scenarios['get-across-metrics-measures-by-attributes-lookup']['doc'] }} - -It is possible to group the resource search results by any attribute of the -requested resource type, and the compute the aggregation: - -{{ scenarios['get-across-metrics-measures-by-attributes-lookup-groupby']['doc'] }} - -Similar to retrieving measures for a single metric, the `refresh` parameter -can be provided to force all POSTed measures to be processed across all -metrics before computing the result. The `resample` parameter may be used as -well. - -.. note:: - - Resampling is done prior to any reaggregation if both parameters are - specified. - -Also, aggregation across metrics have different behavior depending -on whether boundary values are set ('start' and 'stop') and if 'needed_overlap' -is set. - -If boundaries are not set, Gnocchi makes the aggregation only with points -at timestamp present in all timeseries. When boundaries are set, Gnocchi -expects that we have certain percent of timestamps common between timeseries, -this percent is controlled by needed_overlap (defaulted with 100%). If this -percent is not reached an error is returned. - -The ability to fill in points missing from a subset of timeseries is supported -by specifying a `fill` value. Valid fill values include any valid float or -`null` which will compute aggregation with only the points that exist. The -`fill` parameter will not backfill timestamps which contain no points in any -of the timeseries. Only timestamps which have datapoints in at least one of -the timeseries is returned. - -.. note:: - - A granularity must be specified when using the `fill` parameter. - -{{ scenarios['get-across-metrics-measures-by-metric-ids-fill']['doc'] }} - - -Capabilities -============ - -The list aggregation methods that can be used in Gnocchi are extendable and -can differ between deployments. It is possible to get the supported list of -aggregation methods from the API server: - -{{ scenarios['get-capabilities']['doc'] }} - -Status -====== -The overall status of the Gnocchi installation can be retrieved via an API call -reporting values such as the number of new measures to process for each metric: - -{{ scenarios['get-status']['doc'] }} - - -Timestamp format -================ - -Timestamps used in Gnocchi are always returned using the ISO 8601 format. -Gnocchi is able to understand a few formats of timestamp when querying or -creating resources, for example - -- "2014-01-01 12:12:34" or "2014-05-20T10:00:45.856219", ISO 8601 timestamps. -- "10 minutes", which means "10 minutes from now". -- "-2 days", which means "2 days ago". -- 1421767030, a Unix epoch based timestamp. diff --git a/doc/source/rest.yaml b/doc/source/rest.yaml deleted file mode 100644 index 396576eeb..000000000 --- a/doc/source/rest.yaml +++ /dev/null @@ -1,749 +0,0 @@ -- name: create-archive-policy - request: | - POST /v1/archive_policy HTTP/1.1 - Content-Type: application/json - - { - "name": "short", - "back_window": 0, - "definition": [ - { - "granularity": "1s", - "timespan": "1 hour" - }, - { - "points": 48, - "timespan": "1 day" - } - ] - } - -- name: create-archive-policy-without-max - request: | - POST /v1/archive_policy HTTP/1.1 - Content-Type: application/json - - { - "name": "short-without-max", - "aggregation_methods": ["-max", "-min"], - "back_window": 0, - "definition": [ - { - "granularity": "1s", - "timespan": "1 hour" - }, - { - "points": 48, - "timespan": "1 day" - } - ] - } - -- name: get-archive-policy - request: GET /v1/archive_policy/{{ scenarios['create-archive-policy']['response'].json['name'] }} HTTP/1.1 - -- name: list-archive-policy - request: GET /v1/archive_policy HTTP/1.1 - -- name: update-archive-policy - request: | - PATCH /v1/archive_policy/{{ scenarios['create-archive-policy']['response'].json['name'] }} HTTP/1.1 - Content-Type: application/json - - { - "definition": [ - { - "granularity": "1s", - "timespan": "1 hour" - }, - { - "points": 48, - "timespan": "1 day" - } - ] - } - -- name: create-archive-policy-to-delete - request: | - POST /v1/archive_policy HTTP/1.1 - Content-Type: application/json - - { - "name": "some-archive-policy", - "back_window": 0, - "definition": [ - { - "granularity": "1s", - "timespan": "1 hour" - }, - { - "points": 48, - "timespan": "1 day" - } - ] - } - -- name: delete-archive-policy - request: DELETE /v1/archive_policy/{{ scenarios['create-archive-policy-to-delete']['response'].json['name'] }} HTTP/1.1 - -- name: create-metric - request: | - POST /v1/metric HTTP/1.1 - Content-Type: application/json - - { - "archive_policy_name": "high" - } - -- name: create-metric-2 - request: | - POST /v1/metric HTTP/1.1 - Content-Type: application/json - - { - "archive_policy_name": "low" - } - -- name: create-archive-policy-rule - request: | - POST /v1/archive_policy_rule HTTP/1.1 - Content-Type: application/json - - { - "name": "test_rule", - "metric_pattern": "disk.io.*", - "archive_policy_name": "low" - } - -- name: get-archive-policy-rule - request: GET /v1/archive_policy_rule/{{ scenarios['create-archive-policy-rule']['response'].json['name'] }} HTTP/1.1 - -- name: list-archive-policy-rule - request: GET /v1/archive_policy_rule HTTP/1.1 - -- name: create-archive-policy-rule-to-delete - request: | - POST /v1/archive_policy_rule HTTP/1.1 - Content-Type: application/json - - { - "name": "test_rule_delete", - "metric_pattern": "disk.io.*", - "archive_policy_name": "low" - } - -- name: delete-archive-policy-rule - request: DELETE /v1/archive_policy_rule/{{ scenarios['create-archive-policy-rule-to-delete']['response'].json['name'] }} HTTP/1.1 - - -- name: get-metric - request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }} HTTP/1.1 - -- name: list-metric - request: GET /v1/metric HTTP/1.1 - -- name: list-metric-pagination - request: GET /v1/metric?limit=100&sort=name:asc HTTP/1.1 - -- name: post-measures - request: | - POST /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures HTTP/1.1 - Content-Type: application/json - - [ - { - "timestamp": "2014-10-06T14:33:57", - "value": 43.1 - }, - { - "timestamp": "2014-10-06T14:34:12", - "value": 12 - }, - { - "timestamp": "2014-10-06T14:34:20", - "value": 2 - } - ] - -- name: post-measures-batch - request: | - POST /v1/batch/metrics/measures HTTP/1.1 - Content-Type: application/json - - { - "{{ scenarios['create-metric']['response'].json['id'] }}": - [ - { - "timestamp": "2014-10-06T14:34:12", - "value": 12 - }, - { - "timestamp": "2014-10-06T14:34:20", - "value": 2 - } - ], - "{{ scenarios['create-metric-2']['response'].json['id'] }}": - [ - { - "timestamp": "2014-10-06T16:12:12", - "value": 3 - }, - { - "timestamp": "2014-10-06T18:14:52", - "value": 4 - } - ] - } - -- name: search-value-in-metric - request: | - POST /v1/search/metric?metric_id={{ scenarios['create-metric']['response'].json['id'] }} HTTP/1.1 - Content-Type: application/json - - {"and": [{">=": [{"+": 23}, 50]}, {"!=": 55}]} - -- name: create-metric-a - request: | - POST /v1/metric HTTP/1.1 - Content-Type: application/json - - { - "archive_policy_name": "short" - } - -- name: post-measures-for-granularity-search - request: | - POST /v1/metric/{{ scenarios['create-metric-a']['response'].json['id'] }}/measures HTTP/1.1 - Content-Type: application/json - - [ - { - "timestamp": "2014-10-06T14:34:12", - "value": 12 - }, - { - "timestamp": "2014-10-06T14:34:14", - "value": 12 - }, - { - "timestamp": "2014-10-06T14:34:16", - "value": 12 - }, - { - "timestamp": "2014-10-06T14:34:18", - "value": 12 - }, - { - "timestamp": "2014-10-06T14:34:20", - "value": 12 - }, - { - "timestamp": "2014-10-06T14:34:22", - "value": 12 - }, - { - "timestamp": "2014-10-06T14:34:24", - "value": 12 - } - ] - -- name: search-value-in-metrics-by-granularity - request: | - POST /v1/search/metric?metric_id={{ scenarios['create-metric-a']['response'].json['id'] }}&granularity=1second&granularity=1800s HTTP/1.1 - Content-Type: application/json - - {"=": 12} - -- name: get-measures - request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures HTTP/1.1 - -- name: get-measures-from - request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?start=2014-10-06T14:34 HTTP/1.1 - -- name: get-measures-max - request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?aggregation=max HTTP/1.1 - -- name: get-measures-granularity - request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?granularity=1 HTTP/1.1 - -- name: get-measures-refresh - request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?refresh=true HTTP/1.1 - -- name: get-measures-resample - request: GET /v1/metric/{{ scenarios['create-metric']['response'].json['id'] }}/measures?resample=5&granularity=1 HTTP/1.1 - -- name: create-resource-generic - request: | - POST /v1/resource/generic HTTP/1.1 - Content-Type: application/json - - { - "id": "75C44741-CC60-4033-804E-2D3098C7D2E9", - "user_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D", - "project_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D" - } - -- name: create-resource-with-new-metrics - request: | - POST /v1/resource/generic HTTP/1.1 - Content-Type: application/json - - { - "id": "AB68DA77-FA82-4E67-ABA9-270C5A98CBCB", - "user_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D", - "project_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D", - "metrics": {"temperature": {"archive_policy_name": "low"}} - } - -- name: create-resource-type-instance - request: | - POST /v1/resource_type HTTP/1.1 - Content-Type: application/json - - { - "name": "instance", - "attributes": { - "display_name": {"type": "string", "required": true}, - "flavor_id": {"type": "string", "required": true}, - "image_ref": {"type": "string", "required": true}, - "host": {"type": "string", "required": true}, - "server_group": {"type": "string", "required": false} - } - } - -- name: create-resource-instance - request: | - POST /v1/resource/instance HTTP/1.1 - Content-Type: application/json - - { - "id": "6868DA77-FA82-4E67-ABA9-270C5AE8CBCA", - "user_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D", - "project_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D", - "started_at": "2014-01-02 23:23:34", - "ended_at": "2014-01-04 10:00:12", - "flavor_id": "2", - "image_ref": "http://image", - "host": "compute1", - "display_name": "myvm", - "metrics": {} - } - -- name: list-resource-generic - request: GET /v1/resource/generic HTTP/1.1 - -- name: list-resource-instance - request: GET /v1/resource/instance HTTP/1.1 - -- name: list-resource-generic-details - request: GET /v1/resource/generic?details=true HTTP/1.1 - -- name: list-resource-generic-pagination - request: GET /v1/resource/generic?limit=2&sort=id:asc HTTP/1.1 - -- name: search-resource-for-user - request: | - POST /v1/search/resource/instance HTTP/1.1 - Content-Type: application/json - - {"=": {"user_id": "{{ scenarios['create-resource-instance']['response'].json['user_id'] }}"}} - -- name: search-resource-for-host-like - request: | - POST /v1/search/resource/instance HTTP/1.1 - Content-Type: application/json - - {"like": {"host": "compute%"}} - -- name: search-resource-for-user-details - request: | - POST /v1/search/resource/generic?details=true HTTP/1.1 - Content-Type: application/json - - {"=": {"user_id": "{{ scenarios['create-resource-instance']['response'].json['user_id'] }}"}} - -- name: search-resource-for-user-after-timestamp - request: | - POST /v1/search/resource/instance HTTP/1.1 - Content-Type: application/json - - {"and": [ - {"=": {"user_id": "{{ scenarios['create-resource-instance']['response'].json['user_id'] }}"}}, - {">=": {"started_at": "2010-01-01"}} - ]} - -- name: search-resource-lifespan - request: | - POST /v1/search/resource/instance HTTP/1.1 - Content-Type: application/json - - {">=": {"lifespan": "30 min"}} - -- name: get-resource-generic - request: GET /v1/resource/generic/{{ scenarios['create-resource-generic']['response'].json['id'] }} HTTP/1.1 - -- name: get-instance - request: GET /v1/resource/instance/{{ scenarios['create-resource-instance']['response'].json['id'] }} HTTP/1.1 - -- name: create-resource-instance-bis - request: | - POST /v1/resource/instance HTTP/1.1 - Content-Type: application/json - - { - "id": "AB0B5802-E79B-4C84-8998-9237F60D9CAE", - "user_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D", - "project_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D", - "flavor_id": "2", - "image_ref": "http://image", - "host": "compute1", - "display_name": "myvm", - "metrics": {} - } - -- name: patch-resource - request: | - PATCH /v1/resource/instance/{{ scenarios['create-resource-instance']['response'].json['id'] }} HTTP/1.1 - Content-Type: application/json - - {"host": "compute2"} - -- name: get-patched-instance-history - request: GET /v1/resource/instance/{{ scenarios['create-resource-instance']['response'].json['id'] }}/history HTTP/1.1 - -- name: get-patched-instance - request: GET /v1/resource/instance/{{ scenarios['create-resource-instance']['response'].json['id'] }} HTTP/1.1 - - -- name: create-resource-type - request: | - POST /v1/resource_type HTTP/1.1 - Content-Type: application/json - - { - "name": "my_custom_type", - "attributes": { - "myid": {"type": "uuid"}, - "display_name": {"type": "string", "required": true}, - "prefix": {"type": "string", "required": false, "max_length": 8, "min_length": 3}, - "size": {"type": "number", "min": 5, "max": 32.8}, - "enabled": {"type": "bool", "required": false} - } - } - -- name: create-resource-type-2 - request: | - POST /v1/resource_type HTTP/1.1 - Content-Type: application/json - - {"name": "my_other_type"} - -- name: get-resource-type - request: GET /v1/resource_type/my_custom_type HTTP/1.1 - -- name: list-resource-type - request: GET /v1/resource_type HTTP/1.1 - -- name: patch-resource-type - request: | - PATCH /v1/resource_type/my_custom_type HTTP/1.1 - Content-Type: application/json-patch+json - - [ - { - "op": "add", - "path": "/attributes/awesome-stuff", - "value": {"type": "bool", "required": false} - }, - { - "op": "add", - "path": "/attributes/required-stuff", - "value": {"type": "bool", "required": true, "options": {"fill": true}} - }, - { - "op": "remove", - "path": "/attributes/prefix" - } - ] - - -- name: delete-resource-type - request: DELETE /v1/resource_type/my_custom_type HTTP/1.1 - -- name: search-resource-history - request: | - POST /v1/search/resource/instance?history=true HTTP/1.1 - Content-Type: application/json - - {"=": {"id": "{{ scenarios['create-resource-instance']['response'].json['id'] }}"}} - -- name: search-resource-history-in-accept - request: | - POST /v1/search/resource/instance HTTP/1.1 - Content-Type: application/json - Accept: application/json; history=true - - {"=": {"id": "{{ scenarios['create-resource-instance']['response'].json['id'] }}"}} - -- name: search-resource-history-partial - request: | - POST /v1/search/resource/instance HTTP/1.1 - Content-Type: application/json - Accept: application/json; history=true - - {"and": [ - {"=": {"host": "compute1"}}, - {">=": {"revision_start": "{{ scenarios['get-instance']['response'].json['revision_start'] }}"}}, - {"or": [{"<=": {"revision_end": "{{ scenarios['get-patched-instance']['response'].json['revision_start'] }}"}}, - {"=": {"revision_end": null}}]} - ]} - -- name: create-resource-instance-with-metrics - request: | - POST /v1/resource/instance HTTP/1.1 - Content-Type: application/json - - { - "id": "6F24EDD9-5A2F-4592-B708-FFBED821C5D2", - "user_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D", - "project_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D", - "flavor_id": "2", - "image_ref": "http://image", - "host": "compute1", - "display_name": "myvm2", - "server_group": "my_autoscaling_group", - "metrics": {"cpu.util": "{{ scenarios['create-metric']['response'].json['id'] }}"} - } - -- name: create-resource-instance-with-dynamic-metrics - request: | - POST /v1/resource/instance HTTP/1.1 - Content-Type: application/json - - { - "id": "15e9c872-7ca9-11e4-a2da-2fb4032dfc09", - "user_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D", - "project_id": "BD3A1E52-1C62-44CB-BF04-660BD88CD74D", - "flavor_id": "2", - "image_ref": "http://image", - "host": "compute2", - "display_name": "myvm3", - "server_group": "my_autoscaling_group", - "metrics": {"cpu.util": {"archive_policy_name": "{{ scenarios['create-archive-policy']['response'].json['name'] }}"}} - } - -- name: post-measures-batch-named - request: | - POST /v1/batch/resources/metrics/measures HTTP/1.1 - Content-Type: application/json - - { - "{{ scenarios['create-resource-with-new-metrics']['response'].json['id'] }}": { - "temperature": [ - { "timestamp": "2014-10-06T14:34:12", "value": 17 }, - { "timestamp": "2014-10-06T14:34:20", "value": 18 } - ] - }, - "{{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['id'] }}": { - "cpu.util": [ - { "timestamp": "2014-10-06T14:34:12", "value": 12 }, - { "timestamp": "2014-10-06T14:34:20", "value": 2 } - ] - }, - "{{ scenarios['create-resource-instance-with-metrics']['response'].json['id'] }}": { - "cpu.util": [ - { "timestamp": "2014-10-06T14:34:12", "value": 6 }, - { "timestamp": "2014-10-06T14:34:20", "value": 25 } - ] - } - } - -- name: post-measures-batch-named-create - request: | - POST /v1/batch/resources/metrics/measures?create_metrics=true HTTP/1.1 - Content-Type: application/json - - { - "{{ scenarios['create-resource-with-new-metrics']['response'].json['id'] }}": { - "disk.io.test": [ - { "timestamp": "2014-10-06T14:34:12", "value": 71 }, - { "timestamp": "2014-10-06T14:34:20", "value": 81 } - ] - } - } - -- name: delete-resource-generic - request: DELETE /v1/resource/generic/{{ scenarios['create-resource-generic']['response'].json['id'] }} HTTP/1.1 - -- name: create-resources-a - request: | - POST /v1/resource/generic HTTP/1.1 - Content-Type: application/json - - { - "id": "340102AA-AA19-BBE0-E1E2-2D3JDC7D289R", - "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ", - "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ" - } - -- name: create-resources-b - request: | - POST /v1/resource/generic HTTP/1.1 - Content-Type: application/json - - { - "id": "340102AA-AAEF-AA90-E1E2-2D3JDC7D289R", - "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ", - "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ" - } - -- name: create-resources-c - request: | - POST /v1/resource/generic HTTP/1.1 - Content-Type: application/json - - { - "id": "340102AA-AAEF-BCEF-E112-2D3JDC7D289R", - "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ", - "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ" - } - -- name: create-resources-d - request: | - POST /v1/resource/generic HTTP/1.1 - Content-Type: application/json - - { - "id": "340102AA-AAEF-BCEF-E112-2D15DC7D289R", - "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ", - "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ" - } - -- name: create-resources-e - request: | - POST /v1/resource/generic HTTP/1.1 - Content-Type: application/json - - { - "id": "340102AA-AAEF-BCEF-E112-2D3JDC30289R", - "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ", - "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ" - } - -- name: create-resources-f - request: | - POST /v1/resource/generic HTTP/1.1 - Content-Type: application/json - - { - "id": "340102AA-AAEF-BCEF-E112-2D15349D109R", - "user_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ", - "project_id": "BD3A1E52-KKKC-2123-BGLH-WWUUD88CD7WZ" - } - -- name: delete-resources-by-ids - request: | - DELETE /v1/resource/generic HTTP/1.1 - Content-Type: application/json - - { - "in": { - "id": [ - "{{ scenarios['create-resources-a']['response'].json['id'] }}", - "{{ scenarios['create-resources-b']['response'].json['id'] }}", - "{{ scenarios['create-resources-c']['response'].json['id'] }}" - ] - } - } - -- name: delete-resources-by-time - request: | - DELETE /v1/resource/generic HTTP/1.1 - Content-Type: application/json - - { - ">=": {"started_at": "{{ scenarios['create-resources-f']['response'].json['started_at'] }}"} - } - - -- name: get-resource-named-metrics-measures - request: GET /v1/resource/generic/{{ scenarios['create-resource-instance-with-metrics']['response'].json['id'] }}/metric/cpu.util/measures?start=2014-10-06T14:34 HTTP/1.1 - -- name: post-resource-named-metrics-measures1 - request: | - POST /v1/resource/generic/{{ scenarios['create-resource-instance-with-metrics']['response'].json['id'] }}/metric/cpu.util/measures HTTP/1.1 - Content-Type: application/json - - [ - { - "timestamp": "2014-10-06T14:33:57", - "value": 3.5 - }, - { - "timestamp": "2014-10-06T14:34:12", - "value": 20 - }, - { - "timestamp": "2014-10-06T14:34:20", - "value": 9 - } - ] - -- name: post-resource-named-metrics-measures2 - request: | - POST /v1/resource/generic/{{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['id'] }}/metric/cpu.util/measures HTTP/1.1 - Content-Type: application/json - - [ - { - "timestamp": "2014-10-06T14:33:57", - "value": 25.1 - }, - { - "timestamp": "2014-10-06T14:34:12", - "value": 4.5 - }, - { - "timestamp": "2014-10-06T14:34:20", - "value": 14.2 - } - ] - -- name: get-across-metrics-measures-by-attributes-lookup - request: | - POST /v1/aggregation/resource/instance/metric/cpu.util?start=2014-10-06T14:34&aggregation=mean HTTP/1.1 - Content-Type: application/json - - {"=": {"server_group": "my_autoscaling_group"}} - -- name: get-across-metrics-measures-by-attributes-lookup-groupby - request: | - POST /v1/aggregation/resource/instance/metric/cpu.util?groupby=host&groupby=flavor_id HTTP/1.1 - Content-Type: application/json - - {"=": {"server_group": "my_autoscaling_group"}} - -- name: get-across-metrics-measures-by-metric-ids - request: | - GET /v1/aggregation/metric?metric={{ scenarios['create-resource-instance-with-metrics']['response'].json['metrics']['cpu.util'] }}&metric={{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['metrics']['cpu.util'] }}&start=2014-10-06T14:34&aggregation=mean HTTP/1.1 - -- name: get-across-metrics-measures-by-metric-ids-reaggregate - request: | - GET /v1/aggregation/metric?metric={{ scenarios['create-resource-instance-with-metrics']['response'].json['metrics']['cpu.util'] }}&metric={{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['metrics']['cpu.util'] }}&aggregation=mean&reaggregation=min HTTP/1.1 - -- name: get-across-metrics-measures-by-metric-ids-fill - request: | - GET /v1/aggregation/metric?metric={{ scenarios['create-resource-instance-with-metrics']['response'].json['metrics']['cpu.util'] }}&metric={{ scenarios['create-resource-instance-with-dynamic-metrics']['response'].json['metrics']['cpu.util'] }}&fill=0&granularity=1 HTTP/1.1 - -- name: append-metrics-to-resource - request: | - POST /v1/resource/generic/{{ scenarios['create-resource-instance-with-metrics']['response'].json['id'] }}/metric HTTP/1.1 - Content-Type: application/json - - {"memory": {"archive_policy_name": "low"}} - -- name: get-capabilities - request: GET /v1/capabilities HTTP/1.1 - -- name: get-status - request: GET /v1/status HTTP/1.1 diff --git a/doc/source/running.rst b/doc/source/running.rst deleted file mode 100644 index 48c437cab..000000000 --- a/doc/source/running.rst +++ /dev/null @@ -1,246 +0,0 @@ -=============== -Running Gnocchi -=============== - -To run Gnocchi, simply run the HTTP server and metric daemon: - -:: - - gnocchi-api - gnocchi-metricd - - -Running API As A WSGI Application -================================= - -The Gnocchi API tier runs using WSGI. This means it can be run using `Apache -httpd`_ and `mod_wsgi`_, or other HTTP daemon such as `uwsgi`_. You should -configure the number of process and threads according to the number of CPU you -have, usually around 1.5 × number of CPU. If one server is not enough, you can -spawn any number of new API server to scale Gnocchi out, even on different -machines. - -The following uwsgi configuration file can be used:: - - [uwsgi] - http = localhost:8041 - # Set the correct path depending on your installation - wsgi-file = /usr/local/bin/gnocchi-api - master = true - die-on-term = true - threads = 32 - # Adjust based on the number of CPU - processes = 32 - enabled-threads = true - thunder-lock = true - plugins = python - buffer-size = 65535 - lazy-apps = true - -Once written to `/etc/gnocchi/uwsgi.ini`, it can be launched this way:: - - uwsgi /etc/gnocchi/uwsgi.ini - -.. _Apache httpd: http://httpd.apache.org/ -.. _mod_wsgi: https://modwsgi.readthedocs.org/ -.. _uwsgi: https://uwsgi-docs.readthedocs.org/ - -How to define archive policies -============================== - -In Gnocchi, the archive policy definitions are expressed in number of points. -If your archive policy defines a policy of 10 points with a granularity of 1 -second, the time series archive will keep up to 10 seconds, each representing -an aggregation over 1 second. This means the time series will at maximum retain -10 seconds of data (sometimes a bit more) between the more recent point and the -oldest point. That does not mean it will be 10 consecutive seconds: there might -be a gap if data is fed irregularly. - -There is no expiry of data relative to the current timestamp. - -Therefore, both the archive policy and the granularity entirely depends on your -use case. Depending on the usage of your data, you can define several archiving -policies. A typical low grained use case could be:: - - 3600 points with a granularity of 1 second = 1 hour - 1440 points with a granularity of 1 minute = 24 hours - 720 points with a granularity of 1 hour = 30 days - 365 points with a granularity of 1 day = 1 year - -This would represent 6125 points × 9 = 54 KiB per aggregation method. If -you use the 8 standard aggregation method, your metric will take up to 8 × 54 -KiB = 432 KiB of disk space. - -Be aware that the more definitions you set in an archive policy, the more CPU -it will consume. Therefore, creating an archive policy with 2 definitons (e.g. -1 second granularity for 1 day and 1 minute granularity for 1 month) may -consume twice CPU than just one definition (e.g. just 1 second granularity for -1 day). - -Default archive policies -======================== - -By default, 3 archive policies are created when calling `gnocchi-upgrade`: -*low*, *medium* and *high*. The name both describes the storage space and CPU -usage needs. They use `default_aggregation_methods` which is by default set to -*mean*, *min*, *max*, *sum*, *std*, *count*. - -A fourth archive policy named `bool` is also provided by default and is -designed to store only boolean values (i.e. 0 and 1). It only stores one data -point for each second (using the `last` aggregation method), with a one year -retention period. The maximum optimistic storage size is estimated based on the -assumption that no other value than 0 and 1 are sent as measures. If other -values are sent, the maximum pessimistic storage size is taken into account. - -- low - - * 5 minutes granularity over 30 days - * aggregation methods used: `default_aggregation_methods` - * maximum estimated size per metric: 406 KiB - -- medium - - * 1 minute granularity over 7 days - * 1 hour granularity over 365 days - * aggregation methods used: `default_aggregation_methods` - * maximum estimated size per metric: 887 KiB - -- high - - * 1 second granularity over 1 hour - * 1 minute granularity over 1 week - * 1 hour granularity over 1 year - * aggregation methods used: `default_aggregation_methods` - * maximum estimated size per metric: 1 057 KiB - -- bool - * 1 second granularity over 1 year - * aggregation methods used: *last* - * maximum optimistic size per metric: 1 539 KiB - * maximum pessimistic size per metric: 277 172 KiB - -How to plan for Gnocchi’s storage -================================= - -Gnocchi uses a custom file format based on its library *Carbonara*. In Gnocchi, -a time series is a collection of points, where a point is a given measure, or -sample, in the lifespan of a time series. The storage format is compressed -using various techniques, therefore the computing of a time series' size can be -estimated based on its **worst** case scenario with the following formula:: - - number of points × 8 bytes = size in bytes - -The number of points you want to keep is usually determined by the following -formula:: - - number of points = timespan ÷ granularity - -For example, if you want to keep a year of data with a one minute resolution:: - - number of points = (365 days × 24 hours × 60 minutes) ÷ 1 minute - number of points = 525 600 - -Then:: - - size in bytes = 525 600 bytes × 6 = 3 159 600 bytes = 3 085 KiB - -This is just for a single aggregated time series. If your archive policy uses -the 6 default aggregation methods (mean, min, max, sum, std, count) with the -same "one year, one minute aggregations" resolution, the space used will go up -to a maximum of 6 × 4.1 MiB = 24.6 MiB. - -How many metricd workers do we need to run -========================================== - -By default, `gnocchi-metricd` daemon spans all your CPU power in order to -maximize CPU utilisation when computing metric aggregation. You can use the -`gnocchi status` command to query the HTTP API and get the cluster status for -metric processing. It’ll show you the number of metric to process, known as the -processing backlog for `gnocchi-metricd`. As long as this backlog is not -continuously increasing, that means that `gnocchi-metricd` is able to cope with -the amount of metric that are being sent. In case this number of measure to -process is continuously increasing, you will need to (maybe temporarily) -increase the number of `gnocchi-metricd` daemons. You can run any number of -metricd daemon on any number of servers. - -How to scale measure processing -=============================== - -Measurement data pushed to Gnocchi is divided into sacks for better -distribution. The number of partitions is controlled by the `sacks` option -under the `[incoming]` section. This value should be set based on the -number of active metrics the system will capture. Additionally, the number of -`sacks`, should be higher than the total number of active metricd workers. -distribution. Incoming metrics are pushed to specific sacks and each sack -is assigned to one or more `gnocchi-metricd` daemons for processing. - -How many sacks do we need to create ------------------------------------ - -This number of sacks enabled should be set based on the number of active -metrics the system will capture. Additionally, the number of sacks, should -be higher than the total number of active `gnocchi-metricd` workers. - -In general, use the following equation to determine the appropriate `sacks` -value to set:: - - sacks value = number of **active** metrics / 300 - -If the estimated number of metrics is the absolute maximum, divide the value -by 500 instead. If the estimated number of active metrics is conservative and -expected to grow, divide the value by 100 instead to accommodate growth. - -How do we change sack size --------------------------- - -In the event your system grows to capture signficantly more metrics than -originally anticipated, the number of sacks can be changed to maintain good -distribution. To avoid any loss of data when modifying `sacks` option. The -option should be changed in the following order:: - - 1. Stop all input services (api, statsd) - - 2. Stop all metricd services once backlog is cleared - - 3. Run gnocchi-change-sack-size to set new sack size. Note - that sack value can only be changed if the backlog is empty. - - 4. Restart all gnocchi services (api, statsd, metricd) with new configuration - -Alternatively, to minimise API downtime:: - - 1. Run gnocchi-upgrade but use a new incoming storage target such as a new - ceph pool, file path, etc... Additionally, set aggregate storage to a - new target as well. - - 2. Run gnocchi-change-sack-size against new target - - 3. Stop all input services (api, statsd) - - 4. Restart all input services but target newly created incoming storage - - 5. When done clearing backlog from original incoming storage, switch all - metricd datemons to target new incoming storage but maintain original - aggregate storage. - -How to monitor Gnocchi -====================== - -The `/v1/status` endpoint of the HTTP API returns various information, such as -the number of measures to process (measures backlog), which you can easily -monitor (see `How many metricd workers do we need to run`_). Making sure that -the HTTP server and `gnocchi-metricd` daemon are running and are not writing -anything alarming in their logs is a sign of good health of the overall system. - -Total measures for backlog status may not accurately reflect the number of -points to be processed when measures are submitted via batch. - -How to backup and restore Gnocchi -================================= - -In order to be able to recover from an unfortunate event, you need to backup -both the index and the storage. That means creating a database dump (PostgreSQL -or MySQL) and doing snapshots or copy of your data storage (Ceph, S3, Swift or -your file system). The procedure to restore is no more complicated than initial -deployment: restore your index and storage backups, reinstall Gnocchi if -necessary, and restart it. diff --git a/doc/source/statsd.rst b/doc/source/statsd.rst deleted file mode 100644 index 88405b8a8..000000000 --- a/doc/source/statsd.rst +++ /dev/null @@ -1,43 +0,0 @@ -=================== -Statsd Daemon Usage -=================== - -What Is It? -=========== -`Statsd`_ is a network daemon that listens for statistics sent over the network -using TCP or UDP, and then sends aggregates to another backend. - -Gnocchi provides a daemon that is compatible with the statsd protocol and can -listen to metrics sent over the network, named `gnocchi-statsd`. - -.. _`Statsd`: https://github.com/etsy/statsd/ - -How It Works? -============= -In order to enable statsd support in Gnocchi, you need to configure the -`[statsd]` option group in the configuration file. You need to provide a -resource ID that will be used as the main generic resource where all the -metrics will be attached, a user and project id that will be associated with -the resource and metrics, and an archive policy name that will be used to -create the metrics. - -All the metrics will be created dynamically as the metrics are sent to -`gnocchi-statsd`, and attached with the provided name to the resource ID you -configured. - -The `gnocchi-statsd` may be scaled, but trade-offs have to been made due to the -nature of the statsd protocol. That means that if you use metrics of type -`counter`_ or sampling (`c` in the protocol), you should always send those -metrics to the same daemon – or not use them at all. The other supported -types (`timing`_ and `gauges`_) does not suffer this limitation, but be aware -that you might have more measures that expected if you send the same metric to -different `gnocchi-statsd` server, as their cache nor their flush delay are -synchronized. - -.. _`counter`: https://github.com/etsy/statsd/blob/master/docs/metric_types.md#counting -.. _`timing`: https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing -.. _`gauges`: https://github.com/etsy/statsd/blob/master/docs/metric_types.md#gauges - -.. note :: - The statsd protocol support is incomplete: relative gauge values with +/- - and sets are not supported yet. diff --git a/gnocchi/__init__.py b/gnocchi/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/gnocchi/aggregates/__init__.py b/gnocchi/aggregates/__init__.py deleted file mode 100644 index 4d54f4704..000000000 --- a/gnocchi/aggregates/__init__.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import abc - -import six - -from gnocchi import exceptions - - -class CustomAggFailure(Exception): - """Error raised when custom aggregation functions fail for any reason.""" - - def __init__(self, msg): - self.msg = msg - super(CustomAggFailure, self).__init__(msg) - - -@six.add_metaclass(abc.ABCMeta) -class CustomAggregator(object): - - @abc.abstractmethod - def compute(self, storage_obj, metric, start, stop, **param): - """Returns list of (timestamp, window, aggregate value) tuples. - - :param storage_obj: storage object for retrieving the data - :param metric: metric - :param start: start timestamp - :param stop: stop timestamp - :param **param: parameters are window and optionally center. - 'window' is the granularity over which to compute the moving - aggregate. - 'center=True' returns the aggregated data indexed by the central - time in the sampling window, 'False' (default) indexes aggregates - by the oldest time in the window. center is not supported for EWMA. - - """ - raise exceptions.NotImplementedError diff --git a/gnocchi/aggregates/moving_stats.py b/gnocchi/aggregates/moving_stats.py deleted file mode 100644 index b0ce3b405..000000000 --- a/gnocchi/aggregates/moving_stats.py +++ /dev/null @@ -1,145 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2014-2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime - -import numpy -import pandas -import six - -from gnocchi import aggregates -from gnocchi import utils - - -class MovingAverage(aggregates.CustomAggregator): - - @staticmethod - def check_window_valid(window): - """Takes in the window parameter string, reformats as a float.""" - if window is None: - msg = 'Moving aggregate must have window specified.' - raise aggregates.CustomAggFailure(msg) - try: - return utils.to_timespan(six.text_type(window)).total_seconds() - except Exception: - raise aggregates.CustomAggFailure('Invalid value for window') - - @staticmethod - def retrieve_data(storage_obj, metric, start, stop, window): - """Retrieves finest-res data available from storage.""" - all_data = storage_obj.get_measures(metric, start, stop) - - try: - min_grain = min(set([row[1] for row in all_data if row[1] == 0 - or window % row[1] == 0])) - except Exception: - msg = ("No data available that is either full-res or " - "of a granularity that factors into the window size " - "you specified.") - raise aggregates.CustomAggFailure(msg) - - return min_grain, pandas.Series([r[2] for r in all_data - if r[1] == min_grain], - [r[0] for r in all_data - if r[1] == min_grain]) - - @staticmethod - def aggregate_data(data, func, window, min_grain, center=False, - min_size=1): - """Calculates moving func of data with sampling width of window. - - :param data: Series of timestamp, value pairs - :param func: the function to use when aggregating - :param window: (float) range of data to use in each aggregation. - :param min_grain: granularity of the data being passed in. - :param center: whether to index the aggregated values by the first - timestamp of the values picked up by the window or by the central - timestamp. - :param min_size: if the number of points in the window is less than - min_size, the aggregate is not computed and nan is returned for - that iteration. - """ - - if center: - center = utils.strtobool(center) - - def moving_window(x): - msec = datetime.timedelta(milliseconds=1) - zero = datetime.timedelta(seconds=0) - half_span = datetime.timedelta(seconds=window / 2) - start = utils.normalize_time(data.index[0]) - stop = utils.normalize_time( - data.index[-1] + datetime.timedelta(seconds=min_grain)) - # min_grain addition necessary since each bin of rolled-up data - # is indexed by leftmost timestamp of bin. - - left = half_span if center else zero - right = 2 * half_span - left - msec - # msec subtraction is so we don't include right endpoint in slice. - - x = utils.normalize_time(x) - - if x - left >= start and x + right <= stop: - dslice = data[x - left: x + right] - - if center and dslice.size % 2 == 0: - return func([func(data[x - msec - left: x - msec + right]), - func(data[x + msec - left: x + msec + right]) - ]) - - # (NOTE) atmalagon: the msec shift here is so that we have two - # consecutive windows; one centered at time x - msec, - # and one centered at time x + msec. We then average the - # aggregates from the two windows; this result is centered - # at time x. Doing this double average is a way to return a - # centered average indexed by a timestamp that existed in - # the input data (which wouldn't be the case for an even number - # of points if we did only one centered average). - - else: - return numpy.nan - if dslice.size < min_size: - return numpy.nan - return func(dslice) - try: - result = pandas.Series(data.index).apply(moving_window) - - # change from integer index to timestamp index - result.index = data.index - - return [(t, window, r) for t, r - in six.iteritems(result[~result.isnull()])] - except Exception as e: - raise aggregates.CustomAggFailure(str(e)) - - def compute(self, storage_obj, metric, start, stop, window=None, - center=False): - """Returns list of (timestamp, window, aggregated value) tuples. - - :param storage_obj: a call is placed to the storage object to retrieve - the stored data. - :param metric: the metric - :param start: start timestamp - :param stop: stop timestamp - :param window: format string specifying the size over which to - aggregate the retrieved data - :param center: how to index the aggregated data (central timestamp or - leftmost timestamp) - """ - window = self.check_window_valid(window) - min_grain, data = self.retrieve_data(storage_obj, metric, start, - stop, window) - return self.aggregate_data(data, numpy.mean, window, min_grain, center, - min_size=1) diff --git a/gnocchi/archive_policy.py b/gnocchi/archive_policy.py deleted file mode 100644 index 54c64cc28..000000000 --- a/gnocchi/archive_policy.py +++ /dev/null @@ -1,247 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright (c) 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import collections -import datetime -import operator - -from oslo_config import cfg -from oslo_config import types -import six - - -class ArchivePolicy(object): - - DEFAULT_AGGREGATION_METHODS = () - - # TODO(eglynn): figure out how to accommodate multi-valued aggregation - # methods, where there is no longer just a single aggregate - # value to be stored per-period (e.g. ohlc) - VALID_AGGREGATION_METHODS = set( - ('mean', 'sum', 'last', 'max', 'min', - 'std', 'median', 'first', 'count')).union( - set((str(i) + 'pct' for i in six.moves.range(1, 100)))) - - # Set that contains all the above values + their minus equivalent (-mean) - # and the "*" entry. - VALID_AGGREGATION_METHODS_VALUES = VALID_AGGREGATION_METHODS.union( - set(('*',)), - set(map(lambda s: "-" + s, - VALID_AGGREGATION_METHODS)), - set(map(lambda s: "+" + s, - VALID_AGGREGATION_METHODS))) - - def __init__(self, name, back_window, definition, - aggregation_methods=None): - self.name = name - self.back_window = back_window - self.definition = [] - for d in definition: - if isinstance(d, ArchivePolicyItem): - self.definition.append(d) - elif isinstance(d, dict): - self.definition.append(ArchivePolicyItem(**d)) - elif len(d) == 2: - self.definition.append( - ArchivePolicyItem(points=d[0], granularity=d[1])) - else: - raise ValueError( - "Unable to understand policy definition %s" % d) - - duplicate_granularities = [ - granularity - for granularity, count in collections.Counter( - d.granularity for d in self.definition).items() - if count > 1 - ] - if duplicate_granularities: - raise ValueError( - "More than one archive policy " - "uses granularity `%s'" - % duplicate_granularities[0] - ) - - if aggregation_methods is None: - self.aggregation_methods = self.DEFAULT_AGGREGATION_METHODS - else: - self.aggregation_methods = aggregation_methods - - @property - def aggregation_methods(self): - if '*' in self._aggregation_methods: - agg_methods = self.VALID_AGGREGATION_METHODS.copy() - elif all(map(lambda s: s.startswith('-') or s.startswith('+'), - self._aggregation_methods)): - agg_methods = set(self.DEFAULT_AGGREGATION_METHODS) - else: - agg_methods = set(self._aggregation_methods) - - for entry in self._aggregation_methods: - if entry: - if entry[0] == '-': - agg_methods -= set((entry[1:],)) - elif entry[0] == '+': - agg_methods.add(entry[1:]) - - return agg_methods - - @aggregation_methods.setter - def aggregation_methods(self, value): - value = set(value) - rest = value - self.VALID_AGGREGATION_METHODS_VALUES - if rest: - raise ValueError("Invalid value for aggregation_methods: %s" % - rest) - self._aggregation_methods = value - - @classmethod - def from_dict(cls, d): - return cls(d['name'], - d['back_window'], - d['definition'], - d.get('aggregation_methods')) - - def __eq__(self, other): - return (isinstance(other, ArchivePolicy) - and self.name == other.name - and self.back_window == other.back_window - and self.definition == other.definition - and self.aggregation_methods == other.aggregation_methods) - - def jsonify(self): - return { - "name": self.name, - "back_window": self.back_window, - "definition": self.definition, - "aggregation_methods": self.aggregation_methods, - } - - @property - def max_block_size(self): - # The biggest block size is the coarse grained archive definition - return sorted(self.definition, - key=operator.attrgetter("granularity"))[-1].granularity - - -OPTS = [ - cfg.ListOpt( - 'default_aggregation_methods', - item_type=types.String( - choices=ArchivePolicy.VALID_AGGREGATION_METHODS), - default=['mean', 'min', 'max', 'sum', 'std', 'count'], - help='Default aggregation methods to use in created archive policies'), -] - - -class ArchivePolicyItem(dict): - def __init__(self, granularity=None, points=None, timespan=None): - if (granularity is not None - and points is not None - and timespan is not None): - if timespan != granularity * points: - raise ValueError( - u"timespan ≠ granularity × points") - - if granularity is not None and granularity <= 0: - raise ValueError("Granularity should be > 0") - - if points is not None and points <= 0: - raise ValueError("Number of points should be > 0") - - if granularity is None: - if points is None or timespan is None: - raise ValueError( - "At least two of granularity/points/timespan " - "must be provided") - granularity = round(timespan / float(points)) - else: - granularity = float(granularity) - - if points is None: - if timespan is None: - self['timespan'] = None - else: - points = int(timespan / granularity) - self['timespan'] = granularity * points - else: - points = int(points) - self['timespan'] = granularity * points - - self['points'] = points - self['granularity'] = granularity - - @property - def granularity(self): - return self['granularity'] - - @property - def points(self): - return self['points'] - - @property - def timespan(self): - return self['timespan'] - - def jsonify(self): - """Return a dict representation with human readable values.""" - return { - 'timespan': six.text_type( - datetime.timedelta(seconds=self.timespan)) - if self.timespan is not None - else None, - 'granularity': six.text_type( - datetime.timedelta(seconds=self.granularity)), - 'points': self.points, - } - - -DEFAULT_ARCHIVE_POLICIES = { - 'bool': ArchivePolicy( - "bool", 3600, [ - # 1 second resolution for 365 days - ArchivePolicyItem(granularity=1, - timespan=365 * 24 * 60 * 60), - ], - aggregation_methods=("last",), - ), - 'low': ArchivePolicy( - "low", 0, [ - # 5 minutes resolution for 30 days - ArchivePolicyItem(granularity=300, - timespan=30 * 24 * 60 * 60), - ], - ), - 'medium': ArchivePolicy( - "medium", 0, [ - # 1 minute resolution for 7 days - ArchivePolicyItem(granularity=60, - timespan=7 * 24 * 60 * 60), - # 1 hour resolution for 365 days - ArchivePolicyItem(granularity=3600, - timespan=365 * 24 * 60 * 60), - ], - ), - 'high': ArchivePolicy( - "high", 0, [ - # 1 second resolution for an hour - ArchivePolicyItem(granularity=1, points=3600), - # 1 minute resolution for a week - ArchivePolicyItem(granularity=60, points=60 * 24 * 7), - # 1 hour resolution for a year - ArchivePolicyItem(granularity=3600, points=365 * 24), - ], - ), -} diff --git a/gnocchi/carbonara.py b/gnocchi/carbonara.py deleted file mode 100644 index 4716f41a8..000000000 --- a/gnocchi/carbonara.py +++ /dev/null @@ -1,980 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016 Red Hat, Inc. -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Time series data manipulation, better with pancetta.""" - -import datetime -import functools -import logging -import math -import numbers -import random -import re -import struct -import time - -import lz4.block -import numpy -import numpy.lib.recfunctions -import pandas -from scipy import ndimage -import six - -# NOTE(sileht): pandas relies on time.strptime() -# and often triggers http://bugs.python.org/issue7980 -# its dues to our heavy threads usage, this is the workaround -# to ensure the module is correctly loaded before we use really it. -time.strptime("2016-02-19", "%Y-%m-%d") - -LOG = logging.getLogger(__name__) - - -class NoDeloreanAvailable(Exception): - """Error raised when trying to insert a value that is too old.""" - - def __init__(self, first_timestamp, bad_timestamp): - self.first_timestamp = first_timestamp - self.bad_timestamp = bad_timestamp - super(NoDeloreanAvailable, self).__init__( - "%s is before %s" % (bad_timestamp, first_timestamp)) - - -class BeforeEpochError(Exception): - """Error raised when a timestamp before Epoch is used.""" - - def __init__(self, timestamp): - self.timestamp = timestamp - super(BeforeEpochError, self).__init__( - "%s is before Epoch" % timestamp) - - -class UnAggregableTimeseries(Exception): - """Error raised when timeseries cannot be aggregated.""" - def __init__(self, reason): - self.reason = reason - super(UnAggregableTimeseries, self).__init__(reason) - - -class UnknownAggregationMethod(Exception): - """Error raised when the aggregation method is unknown.""" - def __init__(self, agg): - self.aggregation_method = agg - super(UnknownAggregationMethod, self).__init__( - "Unknown aggregation method `%s'" % agg) - - -class InvalidData(ValueError): - """Error raised when data are corrupted.""" - def __init__(self): - super(InvalidData, self).__init__("Unable to unpack, invalid data") - - -def round_timestamp(ts, freq): - return pandas.Timestamp( - (pandas.Timestamp(ts).value // freq) * freq) - - -class GroupedTimeSeries(object): - def __init__(self, ts, granularity): - # NOTE(sileht): The whole class assumes ts is ordered and don't have - # duplicate timestamps, it uses numpy.unique that sorted list, but - # we always assume the orderd to be the same as the input. - freq = granularity * 10e8 - self._ts = ts - self.indexes = (numpy.array(ts.index, numpy.float) // freq) * freq - self.tstamps, self.counts = numpy.unique(self.indexes, - return_counts=True) - - def mean(self): - return self._scipy_aggregate(ndimage.mean) - - def sum(self): - return self._scipy_aggregate(ndimage.sum) - - def min(self): - return self._scipy_aggregate(ndimage.minimum) - - def max(self): - return self._scipy_aggregate(ndimage.maximum) - - def median(self): - return self._scipy_aggregate(ndimage.median) - - def std(self): - # NOTE(sileht): ndimage.standard_deviation is really more performant - # but it use ddof=0, to get the same result as pandas we have to use - # ddof=1. If one day scipy allow to pass ddof, this should be changed. - return self._scipy_aggregate(ndimage.labeled_comprehension, - remove_unique=True, - func=functools.partial(numpy.std, ddof=1), - out_dtype='float64', - default=None) - - def _count(self): - timestamps = self.tstamps.astype('datetime64[ns]', copy=False) - return (self.counts, timestamps) - - def count(self): - return pandas.Series(*self._count()) - - def last(self): - counts, timestamps = self._count() - cumcounts = numpy.cumsum(counts) - 1 - values = self._ts.values[cumcounts] - return pandas.Series(values, pandas.to_datetime(timestamps)) - - def first(self): - counts, timestamps = self._count() - counts = numpy.insert(counts[:-1], 0, 0) - cumcounts = numpy.cumsum(counts) - values = self._ts.values[cumcounts] - return pandas.Series(values, pandas.to_datetime(timestamps)) - - def quantile(self, q): - return self._scipy_aggregate(ndimage.labeled_comprehension, - func=functools.partial( - numpy.percentile, - q=q, - ), - out_dtype='float64', - default=None) - - def _scipy_aggregate(self, method, remove_unique=False, *args, **kwargs): - if remove_unique: - tstamps = self.tstamps[self.counts > 1] - else: - tstamps = self.tstamps - - if len(tstamps) == 0: - return pandas.Series() - - values = method(self._ts.values, self.indexes, tstamps, - *args, **kwargs) - timestamps = tstamps.astype('datetime64[ns]', copy=False) - return pandas.Series(values, pandas.to_datetime(timestamps)) - - -class TimeSerie(object): - """A representation of series of a timestamp with a value. - - Duplicate timestamps are not allowed and will be filtered to use the - last in the group when the TimeSerie is created or extended. - """ - - def __init__(self, ts=None): - if ts is None: - ts = pandas.Series() - self.ts = ts - - @staticmethod - def clean_ts(ts): - if ts.index.has_duplicates: - ts = ts[~ts.index.duplicated(keep='last')] - if not ts.index.is_monotonic: - ts = ts.sort_index() - return ts - - @classmethod - def from_data(cls, timestamps=None, values=None, clean=False): - ts = pandas.Series(values, timestamps) - if clean: - # For format v2 - ts = cls.clean_ts(ts) - return cls(ts) - - @classmethod - def from_tuples(cls, timestamps_values): - return cls.from_data(*zip(*timestamps_values)) - - def __eq__(self, other): - return (isinstance(other, TimeSerie) - and self.ts.all() == other.ts.all()) - - def __getitem__(self, key): - return self.ts[key] - - def set_values(self, values): - t = pandas.Series(*reversed(list(zip(*values)))) - self.ts = self.clean_ts(t).combine_first(self.ts) - - def __len__(self): - return len(self.ts) - - @staticmethod - def _timestamps_and_values_from_dict(values): - timestamps = numpy.array(list(values.keys()), dtype='datetime64[ns]') - timestamps = pandas.to_datetime(timestamps) - v = list(values.values()) - if v: - return timestamps, v - return (), () - - @staticmethod - def _to_offset(value): - if isinstance(value, numbers.Real): - return pandas.tseries.offsets.Nano(value * 10e8) - return pandas.tseries.frequencies.to_offset(value) - - @property - def first(self): - try: - return self.ts.index[0] - except IndexError: - return - - @property - def last(self): - try: - return self.ts.index[-1] - except IndexError: - return - - def group_serie(self, granularity, start=0): - # NOTE(jd) Our whole serialization system is based on Epoch, and we - # store unsigned integer, so we can't store anything before Epoch. - # Sorry! - if self.ts.index[0].value < 0: - raise BeforeEpochError(self.ts.index[0]) - - return GroupedTimeSeries(self.ts[start:], granularity) - - @staticmethod - def _compress(payload): - # FIXME(jd) lz4 > 0.9.2 returns bytearray instead of bytes. But Cradox - # does not accept bytearray but only bytes, so make sure that we have a - # byte type returned. - return memoryview(lz4.block.compress(payload)).tobytes() - - -class BoundTimeSerie(TimeSerie): - def __init__(self, ts=None, block_size=None, back_window=0): - """A time serie that is limited in size. - - Used to represent the full-resolution buffer of incoming raw - datapoints associated with a metric. - - The maximum size of this time serie is expressed in a number of block - size, called the back window. - When the timeserie is truncated, a whole block is removed. - - You cannot set a value using a timestamp that is prior to the last - timestamp minus this number of blocks. By default, a back window of 0 - does not allow you to go back in time prior to the current block being - used. - - """ - super(BoundTimeSerie, self).__init__(ts) - self.block_size = self._to_offset(block_size) - self.back_window = back_window - self._truncate() - - @classmethod - def from_data(cls, timestamps=None, values=None, - block_size=None, back_window=0): - return cls(pandas.Series(values, timestamps), - block_size=block_size, back_window=back_window) - - def __eq__(self, other): - return (isinstance(other, BoundTimeSerie) - and super(BoundTimeSerie, self).__eq__(other) - and self.block_size == other.block_size - and self.back_window == other.back_window) - - def set_values(self, values, before_truncate_callback=None, - ignore_too_old_timestamps=False): - # NOTE: values must be sorted when passed in. - if self.block_size is not None and not self.ts.empty: - first_block_timestamp = self.first_block_timestamp() - if ignore_too_old_timestamps: - for index, (timestamp, value) in enumerate(values): - if timestamp >= first_block_timestamp: - values = values[index:] - break - else: - values = [] - else: - # Check that the smallest timestamp does not go too much back - # in time. - smallest_timestamp = values[0][0] - if smallest_timestamp < first_block_timestamp: - raise NoDeloreanAvailable(first_block_timestamp, - smallest_timestamp) - super(BoundTimeSerie, self).set_values(values) - if before_truncate_callback: - before_truncate_callback(self) - self._truncate() - - _SERIALIZATION_TIMESTAMP_VALUE_LEN = struct.calcsize("" % (self.__class__.__name__, - repr(self.key), - self._carbonara_sampling) - - -class AggregatedTimeSerie(TimeSerie): - - _AGG_METHOD_PCT_RE = re.compile(r"([1-9][0-9]?)pct") - - PADDED_SERIAL_LEN = struct.calcsize("" % ( - self.__class__.__name__, - id(self), - self.sampling, - self.max_size, - self.aggregation_method, - ) - - @staticmethod - def is_compressed(serialized_data): - """Check whatever the data was serialized with compression.""" - return six.indexbytes(serialized_data, 0) == ord("c") - - @classmethod - def unserialize(cls, data, start, agg_method, sampling): - x, y = [], [] - - start = float(start) - if data: - if cls.is_compressed(data): - # Compressed format - uncompressed = lz4.block.decompress( - memoryview(data)[1:].tobytes()) - nb_points = len(uncompressed) // cls.COMPRESSED_SERIAL_LEN - - timestamps_raw = uncompressed[ - :nb_points*cls.COMPRESSED_TIMESPAMP_LEN] - try: - y = numpy.frombuffer(timestamps_raw, dtype=' 0 and - (right_boundary_ts == left_boundary_ts or - (right_boundary_ts is None - and maybe_next_timestamp_is_left_boundary))): - LOG.debug("We didn't find points that overlap in those " - "timeseries. " - "right_boundary_ts=%(right_boundary_ts)s, " - "left_boundary_ts=%(left_boundary_ts)s, " - "groups=%(groups)s", { - 'right_boundary_ts': right_boundary_ts, - 'left_boundary_ts': left_boundary_ts, - 'groups': list(grouped) - }) - raise UnAggregableTimeseries('No overlap') - - # NOTE(sileht): this call the aggregation method on already - # aggregated values, for some kind of aggregation this can - # result can looks weird, but this is the best we can do - # because we don't have anymore the raw datapoints in those case. - # FIXME(sileht): so should we bailout is case of stddev, percentile - # and median? - agg_timeserie = getattr(grouped, aggregation)() - agg_timeserie = agg_timeserie.dropna().reset_index() - - if from_timestamp is None and left_boundary_ts: - agg_timeserie = agg_timeserie[ - agg_timeserie['timestamp'] >= left_boundary_ts] - if to_timestamp is None and right_boundary_ts: - agg_timeserie = agg_timeserie[ - agg_timeserie['timestamp'] <= right_boundary_ts] - - points = (agg_timeserie.sort_values(by=['granularity', 'timestamp'], - ascending=[0, 1]).itertuples()) - return [(timestamp, granularity, value) - for __, timestamp, granularity, value in points] - - -if __name__ == '__main__': - import sys - args = sys.argv[1:] - if not args or "--boundtimeserie" in args: - BoundTimeSerie.benchmark() - if not args or "--aggregatedtimeserie" in args: - AggregatedTimeSerie.benchmark() diff --git a/gnocchi/cli.py b/gnocchi/cli.py deleted file mode 100644 index 06e1fbbc5..000000000 --- a/gnocchi/cli.py +++ /dev/null @@ -1,317 +0,0 @@ -# Copyright (c) 2013 Mirantis Inc. -# Copyright (c) 2015-2017 Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import sys -import threading -import time - -import cotyledon -from cotyledon import oslo_config_glue -from futurist import periodics -from oslo_config import cfg -from oslo_log import log -import six -import tenacity -import tooz - -from gnocchi import archive_policy -from gnocchi import genconfig -from gnocchi import indexer -from gnocchi import service -from gnocchi import statsd as statsd_service -from gnocchi import storage -from gnocchi.storage import incoming -from gnocchi import utils - - -LOG = log.getLogger(__name__) - - -def config_generator(): - return genconfig.prehook(None, sys.argv[1:]) - - -def upgrade(): - conf = cfg.ConfigOpts() - conf.register_cli_opts([ - cfg.BoolOpt("skip-index", default=False, - help="Skip index upgrade."), - cfg.BoolOpt("skip-storage", default=False, - help="Skip storage upgrade."), - cfg.BoolOpt("skip-archive-policies-creation", default=False, - help="Skip default archive policies creation."), - cfg.IntOpt("num-storage-sacks", default=128, - help="Initial number of storage sacks to create."), - - ]) - conf = service.prepare_service(conf=conf) - index = indexer.get_driver(conf) - index.connect() - if not conf.skip_index: - LOG.info("Upgrading indexer %s", index) - index.upgrade() - if not conf.skip_storage: - s = storage.get_driver(conf) - LOG.info("Upgrading storage %s", s) - s.upgrade(index, conf.num_storage_sacks) - - if (not conf.skip_archive_policies_creation - and not index.list_archive_policies() - and not index.list_archive_policy_rules()): - for name, ap in six.iteritems(archive_policy.DEFAULT_ARCHIVE_POLICIES): - index.create_archive_policy(ap) - index.create_archive_policy_rule("default", "*", "low") - - -def change_sack_size(): - conf = cfg.ConfigOpts() - conf.register_cli_opts([ - cfg.IntOpt("sack_size", required=True, min=1, - help="Number of sacks."), - ]) - conf = service.prepare_service(conf=conf) - s = storage.get_driver(conf) - report = s.incoming.measures_report(details=False) - remainder = report['summary']['measures'] - if remainder: - LOG.error('Cannot change sack when non-empty backlog. Process ' - 'remaining %s measures and try again', remainder) - return - LOG.info("Changing sack size to: %s", conf.sack_size) - old_num_sacks = s.incoming.get_storage_sacks() - s.incoming.set_storage_settings(conf.sack_size) - s.incoming.remove_sack_group(old_num_sacks) - - -def statsd(): - statsd_service.start() - - -class MetricProcessBase(cotyledon.Service): - def __init__(self, worker_id, conf, interval_delay=0): - super(MetricProcessBase, self).__init__(worker_id) - self.conf = conf - self.startup_delay = worker_id - self.interval_delay = interval_delay - self._shutdown = threading.Event() - self._shutdown_done = threading.Event() - - def _configure(self): - self.store = storage.get_driver(self.conf) - self.index = indexer.get_driver(self.conf) - self.index.connect() - - def run(self): - self._configure() - # Delay startup so workers are jittered. - time.sleep(self.startup_delay) - - while not self._shutdown.is_set(): - with utils.StopWatch() as timer: - self._run_job() - self._shutdown.wait(max(0, self.interval_delay - timer.elapsed())) - self._shutdown_done.set() - - def terminate(self): - self._shutdown.set() - self.close_services() - LOG.info("Waiting ongoing metric processing to finish") - self._shutdown_done.wait() - - @staticmethod - def close_services(): - pass - - @staticmethod - def _run_job(): - raise NotImplementedError - - -class MetricReporting(MetricProcessBase): - name = "reporting" - - def __init__(self, worker_id, conf): - super(MetricReporting, self).__init__( - worker_id, conf, conf.metricd.metric_reporting_delay) - - def _run_job(self): - try: - report = self.store.incoming.measures_report(details=False) - LOG.info("%d measurements bundles across %d " - "metrics wait to be processed.", - report['summary']['measures'], - report['summary']['metrics']) - except incoming.ReportGenerationError: - LOG.warning("Unable to compute backlog. Retrying at next " - "interval.") - except Exception: - LOG.error("Unexpected error during pending measures reporting", - exc_info=True) - - -class MetricProcessor(MetricProcessBase): - name = "processing" - GROUP_ID = "gnocchi-processing" - - def __init__(self, worker_id, conf): - super(MetricProcessor, self).__init__( - worker_id, conf, conf.metricd.metric_processing_delay) - self._coord, self._my_id = utils.get_coordinator_and_start( - conf.storage.coordination_url) - self._tasks = [] - self.group_state = None - - @utils.retry - def _configure(self): - super(MetricProcessor, self)._configure() - # create fallback in case paritioning fails or assigned no tasks - self.fallback_tasks = list( - six.moves.range(self.store.incoming.NUM_SACKS)) - try: - self.partitioner = self._coord.join_partitioned_group( - self.GROUP_ID, partitions=200) - LOG.info('Joined coordination group: %s', self.GROUP_ID) - - @periodics.periodic(spacing=self.conf.metricd.worker_sync_rate, - run_immediately=True) - def run_watchers(): - self._coord.run_watchers() - - self.periodic = periodics.PeriodicWorker.create([]) - self.periodic.add(run_watchers) - t = threading.Thread(target=self.periodic.start) - t.daemon = True - t.start() - except NotImplementedError: - LOG.warning('Coordinator does not support partitioning. Worker ' - 'will battle against other workers for jobs.') - except tooz.ToozError as e: - LOG.error('Unexpected error configuring coordinator for ' - 'partitioning. Retrying: %s', e) - raise tenacity.TryAgain(e) - - def _get_tasks(self): - try: - if (not self._tasks or - self.group_state != self.partitioner.ring.nodes): - self.group_state = self.partitioner.ring.nodes.copy() - self._tasks = [ - i for i in six.moves.range(self.store.incoming.NUM_SACKS) - if self.partitioner.belongs_to_self( - i, replicas=self.conf.metricd.processing_replicas)] - finally: - return self._tasks or self.fallback_tasks - - def _run_job(self): - m_count = 0 - s_count = 0 - in_store = self.store.incoming - for s in self._get_tasks(): - # TODO(gordc): support delay release lock so we don't - # process a sack right after another process - lock = in_store.get_sack_lock(self._coord, s) - if not lock.acquire(blocking=False): - continue - try: - metrics = in_store.list_metric_with_measures_to_process(s) - m_count += len(metrics) - self.store.process_background_tasks(self.index, metrics) - s_count += 1 - except Exception: - LOG.error("Unexpected error processing assigned job", - exc_info=True) - finally: - lock.release() - LOG.debug("%d metrics processed from %d sacks", m_count, s_count) - - def close_services(self): - self._coord.stop() - - -class MetricJanitor(MetricProcessBase): - name = "janitor" - - def __init__(self, worker_id, conf): - super(MetricJanitor, self).__init__( - worker_id, conf, conf.metricd.metric_cleanup_delay) - - def _run_job(self): - try: - self.store.expunge_metrics(self.index) - LOG.debug("Metrics marked for deletion removed from backend") - except Exception: - LOG.error("Unexpected error during metric cleanup", exc_info=True) - - -class MetricdServiceManager(cotyledon.ServiceManager): - def __init__(self, conf): - super(MetricdServiceManager, self).__init__() - oslo_config_glue.setup(self, conf) - - self.conf = conf - self.metric_processor_id = self.add( - MetricProcessor, args=(self.conf,), - workers=conf.metricd.workers) - if self.conf.metricd.metric_reporting_delay >= 0: - self.add(MetricReporting, args=(self.conf,)) - self.add(MetricJanitor, args=(self.conf,)) - - self.register_hooks(on_reload=self.on_reload) - - def on_reload(self): - # NOTE(sileht): We do not implement reload() in Workers so all workers - # will received SIGHUP and exit gracefully, then their will be - # restarted with the new number of workers. This is important because - # we use the number of worker to declare the capability in tooz and - # to select the block of metrics to proceed. - self.reconfigure(self.metric_processor_id, - workers=self.conf.metricd.workers) - - def run(self): - super(MetricdServiceManager, self).run() - self.queue.close() - - -def metricd_tester(conf): - # NOTE(sileht): This method is designed to be profiled, we - # want to avoid issues with profiler and os.fork(), that - # why we don't use the MetricdServiceManager. - index = indexer.get_driver(conf) - index.connect() - s = storage.get_driver(conf) - metrics = set() - for i in six.moves.range(s.incoming.NUM_SACKS): - metrics.update(s.incoming.list_metric_with_measures_to_process(i)) - if len(metrics) >= conf.stop_after_processing_metrics: - break - s.process_new_measures( - index, list(metrics)[:conf.stop_after_processing_metrics], True) - - -def metricd(): - conf = cfg.ConfigOpts() - conf.register_cli_opts([ - cfg.IntOpt("stop-after-processing-metrics", - default=0, - min=0, - help="Number of metrics to process without workers, " - "for testing purpose"), - ]) - conf = service.prepare_service(conf=conf) - - if conf.stop_after_processing_metrics: - metricd_tester(conf) - else: - MetricdServiceManager(conf).run() diff --git a/gnocchi/exceptions.py b/gnocchi/exceptions.py deleted file mode 100644 index 81b484bf3..000000000 --- a/gnocchi/exceptions.py +++ /dev/null @@ -1,19 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class NotImplementedError(NotImplementedError): - pass diff --git a/gnocchi/genconfig.py b/gnocchi/genconfig.py deleted file mode 100644 index 0eba7359f..000000000 --- a/gnocchi/genconfig.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016-2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - - -def prehook(cmd, args=None): - if args is None: - args = ['--output-file', 'etc/gnocchi/gnocchi.conf'] - try: - from oslo_config import generator - generator.main( - ['--config-file', - '%s/gnocchi-config-generator.conf' % os.path.dirname(__file__)] - + args) - except Exception as e: - print("Unable to build sample configuration file: %s" % e) diff --git a/gnocchi/gendoc.py b/gnocchi/gendoc.py deleted file mode 100644 index 7b9a8a117..000000000 --- a/gnocchi/gendoc.py +++ /dev/null @@ -1,178 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from __future__ import absolute_import -import json -import os -import subprocess -import sys -import tempfile - -import jinja2 -import six -import six.moves -import webob.request -import yaml - -from gnocchi.tests import test_rest - -# HACK(jd) Not sure why but Sphinx setup this multiple times, so we just avoid -# doing several times the requests by using this global variable :( -_RUN = False - - -def _setup_test_app(): - t = test_rest.RestTest() - t.auth_mode = "basic" - t.setUpClass() - t.setUp() - return t.app - - -def _format_json(txt): - return json.dumps(json.loads(txt), - sort_keys=True, - indent=2) - - -def _extract_body(req_or_resp): - # TODO(jd) Make this a Sphinx option - if req_or_resp.content_type == "application/json": - body = _format_json(req_or_resp.body) - else: - body = req_or_resp.body - return "\n ".join(body.split("\n")) - - -def _format_headers(headers): - return "\n".join( - " %s: %s" % (k, v) - for k, v in six.iteritems(headers)) - - -def _response_to_httpdomain(response): - return """ - .. sourcecode:: http - - HTTP/1.1 %(status)s -%(headers)s - - %(body)s""" % { - 'status': response.status, - 'body': _extract_body(response), - 'headers': _format_headers(response.headers), - } - - -def _request_to_httpdomain(request): - return """ - .. sourcecode:: http - - %(method)s %(path)s %(http_version)s -%(headers)s - - %(body)s""" % { - 'body': _extract_body(request), - 'method': request.method, - 'path': request.path_qs, - 'http_version': request.http_version, - 'headers': _format_headers(request.headers), - } - - -def _format_request_reply(request, response): - return (_request_to_httpdomain(request) - + "\n" - + _response_to_httpdomain(response)) - - -class ScenarioList(list): - def __getitem__(self, key): - for scenario in self: - if scenario['name'] == key: - return scenario - return super(ScenarioList, self).__getitem__(key) - - -multiversion_hack = """ -import sys -import os - -srcdir = os.path.join("%s", "..", "..") -os.chdir(srcdir) -sys.path.insert(0, srcdir) - -class FakeApp(object): - def info(self, *args, **kwasrgs): - pass - -import gnocchi.gendoc -gnocchi.gendoc.setup(FakeApp()) -""" - - -def setup(app): - global _RUN - if _RUN: - return - - # NOTE(sileht): On gnocchi.xyz, we build a multiversion of the docs - # all versions are built with the master gnocchi.gendoc sphinx extension. - # So the hack here run an other python script to generate the rest.rst - # file of old version of the module. - # It also drop the database before each run. - if sys.argv[0].endswith("sphinx-versioning"): - subprocess.call(["dropdb", os.environ['PGDATABASE']]) - subprocess.call(["createdb", os.environ['PGDATABASE']]) - - with tempfile.NamedTemporaryFile() as f: - f.write(multiversion_hack % app.confdir) - f.flush() - subprocess.call(['python', f.name]) - _RUN = True - return - - webapp = _setup_test_app() - # TODO(jd) Do not hardcode doc/source - with open("doc/source/rest.yaml") as f: - scenarios = ScenarioList(yaml.load(f)) - for entry in scenarios: - template = jinja2.Template(entry['request']) - fake_file = six.moves.cStringIO() - fake_file.write(template.render(scenarios=scenarios).encode('utf-8')) - fake_file.seek(0) - request = webapp.RequestClass.from_file(fake_file) - - # TODO(jd) Fix this lame bug in webob < 1.7 - if (hasattr(webob.request, "http_method_probably_has_body") - and request.method == "DELETE"): - # Webob has a bug it does not read the body for DELETE, l4m3r - clen = request.content_length - if clen is None: - request.body = fake_file.read() - else: - request.body = fake_file.read(clen) - - app.info("Doing request %s: %s" % (entry['name'], - six.text_type(request))) - with webapp.use_admin_user(): - response = webapp.request(request) - entry['response'] = response - entry['doc'] = _format_request_reply(request, response) - with open("doc/source/rest.j2", "r") as f: - template = jinja2.Template(f.read().decode('utf-8')) - with open("doc/source/rest.rst", "w") as f: - f.write(template.render(scenarios=scenarios).encode('utf-8')) - _RUN = True diff --git a/gnocchi/gnocchi-config-generator.conf b/gnocchi/gnocchi-config-generator.conf deleted file mode 100644 index df6e98806..000000000 --- a/gnocchi/gnocchi-config-generator.conf +++ /dev/null @@ -1,11 +0,0 @@ -[DEFAULT] -wrap_width = 79 -namespace = gnocchi -namespace = oslo.db -namespace = oslo.log -namespace = oslo.middleware.cors -namespace = oslo.middleware.healthcheck -namespace = oslo.middleware.http_proxy_to_wsgi -namespace = oslo.policy -namespace = cotyledon -namespace = keystonemiddleware.auth_token diff --git a/gnocchi/indexer/__init__.py b/gnocchi/indexer/__init__.py deleted file mode 100644 index 1ffc9cb4f..000000000 --- a/gnocchi/indexer/__init__.py +++ /dev/null @@ -1,411 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import fnmatch -import hashlib -import os - -import iso8601 -from oslo_config import cfg -import six -from six.moves.urllib import parse -from stevedore import driver - -from gnocchi import exceptions - -OPTS = [ - cfg.StrOpt('url', - secret=True, - required=True, - default=os.getenv("GNOCCHI_INDEXER_URL"), - help='Indexer driver to use'), -] - - -_marker = object() - - -class Resource(object): - def get_metric(self, metric_name): - for m in self.metrics: - if m.name == metric_name: - return m - - def __eq__(self, other): - return (self.id == other.id - and self.type == other.type - and self.revision == other.revision - and self.revision_start == other.revision_start - and self.revision_end == other.revision_end - and self.creator == other.creator - and self.user_id == other.user_id - and self.project_id == other.project_id - and self.started_at == other.started_at - and self.ended_at == other.ended_at) - - @property - def etag(self): - etag = hashlib.sha1() - etag.update(six.text_type(self.id).encode('utf-8')) - etag.update(six.text_type( - self.revision_start.isoformat()).encode('utf-8')) - return etag.hexdigest() - - @property - def lastmodified(self): - # less precise revision start for Last-Modified http header - return self.revision_start.replace(microsecond=0, - tzinfo=iso8601.iso8601.UTC) - - -def get_driver(conf): - """Return the configured driver.""" - split = parse.urlsplit(conf.indexer.url) - d = driver.DriverManager('gnocchi.indexer', - split.scheme).driver - return d(conf) - - -class IndexerException(Exception): - """Base class for all exceptions raised by an indexer.""" - - -class NoSuchResourceType(IndexerException): - """Error raised when the resource type is unknown.""" - def __init__(self, type): - super(NoSuchResourceType, self).__init__( - "Resource type %s does not exist" % type) - self.type = type - - -class NoSuchMetric(IndexerException): - """Error raised when a metric does not exist.""" - def __init__(self, metric): - super(NoSuchMetric, self).__init__("Metric %s does not exist" % - metric) - self.metric = metric - - -class NoSuchResource(IndexerException): - """Error raised when a resource does not exist.""" - def __init__(self, resource): - super(NoSuchResource, self).__init__("Resource %s does not exist" % - resource) - self.resource = resource - - -class NoSuchArchivePolicy(IndexerException): - """Error raised when an archive policy does not exist.""" - def __init__(self, archive_policy): - super(NoSuchArchivePolicy, self).__init__( - "Archive policy %s does not exist" % archive_policy) - self.archive_policy = archive_policy - - -class UnsupportedArchivePolicyChange(IndexerException): - """Error raised when modifying archive policy if not supported.""" - def __init__(self, archive_policy, message): - super(UnsupportedArchivePolicyChange, self).__init__( - "Archive policy %s does not support change: %s" % - (archive_policy, message)) - self.archive_policy = archive_policy - self.message = message - - -class ArchivePolicyInUse(IndexerException): - """Error raised when an archive policy is still being used.""" - def __init__(self, archive_policy): - super(ArchivePolicyInUse, self).__init__( - "Archive policy %s is still in use" % archive_policy) - self.archive_policy = archive_policy - - -class ResourceTypeInUse(IndexerException): - """Error raised when an resource type is still being used.""" - def __init__(self, resource_type): - super(ResourceTypeInUse, self).__init__( - "Resource type %s is still in use" % resource_type) - self.resource_type = resource_type - - -class UnexpectedResourceTypeState(IndexerException): - """Error raised when an resource type state is not expected.""" - def __init__(self, resource_type, expected_state, state): - super(UnexpectedResourceTypeState, self).__init__( - "Resource type %s state is %s (expected: %s)" % ( - resource_type, state, expected_state)) - self.resource_type = resource_type - self.expected_state = expected_state - self.state = state - - -class NoSuchArchivePolicyRule(IndexerException): - """Error raised when an archive policy rule does not exist.""" - def __init__(self, archive_policy_rule): - super(NoSuchArchivePolicyRule, self).__init__( - "Archive policy rule %s does not exist" % - archive_policy_rule) - self.archive_policy_rule = archive_policy_rule - - -class NoArchivePolicyRuleMatch(IndexerException): - """Error raised when no archive policy rule found for metric.""" - def __init__(self, metric_name): - super(NoArchivePolicyRuleMatch, self).__init__( - "No Archive policy rule found for metric %s" % - metric_name) - self.metric_name = metric_name - - -class NamedMetricAlreadyExists(IndexerException): - """Error raised when a named metric already exists.""" - def __init__(self, metric): - super(NamedMetricAlreadyExists, self).__init__( - "Named metric %s already exists" % metric) - self.metric = metric - - -class ResourceAlreadyExists(IndexerException): - """Error raised when a resource already exists.""" - def __init__(self, resource): - super(ResourceAlreadyExists, self).__init__( - "Resource %s already exists" % resource) - self.resource = resource - - -class ResourceTypeAlreadyExists(IndexerException): - """Error raised when a resource type already exists.""" - def __init__(self, resource_type): - super(ResourceTypeAlreadyExists, self).__init__( - "Resource type %s already exists" % resource_type) - self.resource_type = resource_type - - -class ResourceAttributeError(IndexerException, AttributeError): - """Error raised when an attribute does not exist for a resource type.""" - def __init__(self, resource, attribute): - super(ResourceAttributeError, self).__init__( - "Resource type %s has no %s attribute" % (resource, attribute)) - self.resource = resource - self.attribute = attribute - - -class ResourceValueError(IndexerException, ValueError): - """Error raised when an attribute value is invalid for a resource type.""" - def __init__(self, resource_type, attribute, value): - super(ResourceValueError, self).__init__( - "Value %s for attribute %s on resource type %s is invalid" - % (value, attribute, resource_type)) - self.resource_type = resource_type - self.attribute = attribute - self.value = value - - -class ArchivePolicyAlreadyExists(IndexerException): - """Error raised when an archive policy already exists.""" - def __init__(self, name): - super(ArchivePolicyAlreadyExists, self).__init__( - "Archive policy %s already exists" % name) - self.name = name - - -class ArchivePolicyRuleAlreadyExists(IndexerException): - """Error raised when an archive policy rule already exists.""" - def __init__(self, name): - super(ArchivePolicyRuleAlreadyExists, self).__init__( - "Archive policy rule %s already exists" % name) - self.name = name - - -class QueryError(IndexerException): - def __init__(self): - super(QueryError, self).__init__("Unable to parse this query") - - -class QueryValueError(QueryError, ValueError): - def __init__(self, v, f): - super(QueryError, self).__init__("Invalid value: `%s' for field `%s'" - % (v, f)) - - -class QueryInvalidOperator(QueryError): - def __init__(self, op): - self.op = op - super(QueryError, self).__init__("Unknown operator `%s'" % op) - - -class QueryAttributeError(QueryError, ResourceAttributeError): - def __init__(self, resource, attribute): - ResourceAttributeError.__init__(self, resource, attribute) - - -class InvalidPagination(IndexerException): - """Error raised when a resource does not exist.""" - def __init__(self, reason): - self.reason = reason - super(InvalidPagination, self).__init__( - "Invalid pagination: `%s'" % reason) - - -class IndexerDriver(object): - @staticmethod - def __init__(conf): - pass - - @staticmethod - def connect(): - pass - - @staticmethod - def disconnect(): - pass - - @staticmethod - def upgrade(nocreate=False): - pass - - @staticmethod - def get_resource(resource_type, resource_id, with_metrics=False): - """Get a resource from the indexer. - - :param resource_type: The type of the resource to look for. - :param resource_id: The UUID of the resource. - :param with_metrics: Whether to include metrics information. - """ - raise exceptions.NotImplementedError - - @staticmethod - def list_resources(resource_type='generic', - attribute_filter=None, - details=False, - history=False, - limit=None, - marker=None, - sorts=None): - raise exceptions.NotImplementedError - - @staticmethod - def list_archive_policies(): - raise exceptions.NotImplementedError - - @staticmethod - def get_archive_policy(name): - raise exceptions.NotImplementedError - - @staticmethod - def update_archive_policy(name, ap_items): - raise exceptions.NotImplementedError - - @staticmethod - def delete_archive_policy(name): - raise exceptions.NotImplementedError - - @staticmethod - def get_archive_policy_rule(name): - raise exceptions.NotImplementedError - - @staticmethod - def list_archive_policy_rules(): - raise exceptions.NotImplementedError - - @staticmethod - def create_archive_policy_rule(name, metric_pattern, archive_policy_name): - raise exceptions.NotImplementedError - - @staticmethod - def delete_archive_policy_rule(name): - raise exceptions.NotImplementedError - - @staticmethod - def create_metric(id, creator, - archive_policy_name, name=None, unit=None, - resource_id=None): - raise exceptions.NotImplementedError - - @staticmethod - def list_metrics(names=None, ids=None, details=False, status='active', - limit=None, marker=None, sorts=None, **kwargs): - raise exceptions.NotImplementedError - - @staticmethod - def create_archive_policy(archive_policy): - raise exceptions.NotImplementedError - - @staticmethod - def create_resource(resource_type, id, creator, - user_id=None, project_id=None, - started_at=None, ended_at=None, metrics=None, - **kwargs): - raise exceptions.NotImplementedError - - @staticmethod - def update_resource(resource_type, resource_id, ended_at=_marker, - metrics=_marker, - append_metrics=False, - create_revision=True, - **kwargs): - raise exceptions.NotImplementedError - - @staticmethod - def delete_resource(uuid): - raise exceptions.NotImplementedError - - @staticmethod - def delete_resources(resource_type='generic', - attribute_filter=None): - raise exceptions.NotImplementedError - - @staticmethod - def delete_metric(id): - raise exceptions.NotImplementedError - - @staticmethod - def expunge_metric(id): - raise exceptions.NotImplementedError - - def get_archive_policy_for_metric(self, metric_name): - """Helper to get the archive policy according archive policy rules.""" - rules = self.list_archive_policy_rules() - for rule in rules: - if fnmatch.fnmatch(metric_name or "", rule.metric_pattern): - return self.get_archive_policy(rule.archive_policy_name) - raise NoArchivePolicyRuleMatch(metric_name) - - @staticmethod - def create_resource_type(resource_type): - raise exceptions.NotImplementedError - - @staticmethod - def get_resource_type(name): - """Get a resource type from the indexer. - - :param name: name of the resource type - """ - raise exceptions.NotImplementedError - - @staticmethod - def list_resource_types(attribute_filter=None, - limit=None, - marker=None, - sorts=None): - raise exceptions.NotImplementedError - - @staticmethod - def get_resource_attributes_schemas(): - raise exceptions.NotImplementedError - - @staticmethod - def get_resource_type_schema(): - raise exceptions.NotImplementedError diff --git a/gnocchi/indexer/alembic/alembic.ini b/gnocchi/indexer/alembic/alembic.ini deleted file mode 100644 index db7340acd..000000000 --- a/gnocchi/indexer/alembic/alembic.ini +++ /dev/null @@ -1,3 +0,0 @@ -[alembic] -script_location = gnocchi.indexer:alembic -sqlalchemy.url = postgresql://localhost/gnocchi diff --git a/gnocchi/indexer/alembic/env.py b/gnocchi/indexer/alembic/env.py deleted file mode 100644 index 47f58efbf..000000000 --- a/gnocchi/indexer/alembic/env.py +++ /dev/null @@ -1,90 +0,0 @@ -# -# Copyright 2015 Red Hat. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A test module to exercise the Gnocchi API with gabbi.""" - -from alembic import context - -from gnocchi.indexer import sqlalchemy -from gnocchi.indexer import sqlalchemy_base - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -# target_metadata = mymodel.Base.metadata -target_metadata = sqlalchemy_base.Base.metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - conf = config.conf - context.configure(url=conf.indexer.url, - target_metadata=target_metadata) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - conf = config.conf - indexer = sqlalchemy.SQLAlchemyIndexer(conf) - indexer.connect() - with indexer.facade.writer_connection() as connectable: - - with connectable.connect() as connection: - context.configure( - connection=connection, - target_metadata=target_metadata - ) - - with context.begin_transaction(): - context.run_migrations() - - indexer.disconnect() - -# If `alembic' was used directly from the CLI -if not hasattr(config, "conf"): - from gnocchi import service - config.conf = service.prepare_service([]) - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/gnocchi/indexer/alembic/script.py.mako b/gnocchi/indexer/alembic/script.py.mako deleted file mode 100644 index 8f4e92ea7..000000000 --- a/gnocchi/indexer/alembic/script.py.mako +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright ${create_date.year} OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision | comma,n} -Create Date: ${create_date} - -""" - -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} -branch_labels = ${repr(branch_labels)} -depends_on = ${repr(depends_on)} - - -def upgrade(): - ${upgrades if upgrades else "pass"} diff --git a/gnocchi/indexer/alembic/versions/0718ed97e5b3_add_tablename_to_resource_type.py b/gnocchi/indexer/alembic/versions/0718ed97e5b3_add_tablename_to_resource_type.py deleted file mode 100644 index 8662b1146..000000000 --- a/gnocchi/indexer/alembic/versions/0718ed97e5b3_add_tablename_to_resource_type.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Add tablename to resource_type - -Revision ID: 0718ed97e5b3 -Revises: 828c16f70cce -Create Date: 2016-01-20 08:14:04.893783 - -""" - -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = '0718ed97e5b3' -down_revision = '828c16f70cce' -branch_labels = None -depends_on = None - - -def upgrade(): - op.add_column("resource_type", sa.Column('tablename', sa.String(18), - nullable=True)) - - resource_type = sa.Table( - 'resource_type', sa.MetaData(), - sa.Column('name', sa.String(255), nullable=False), - sa.Column('tablename', sa.String(18), nullable=True) - ) - op.execute(resource_type.update().where( - resource_type.c.name == "instance_network_interface" - ).values({'tablename': op.inline_literal("'instance_net_int'")})) - op.execute(resource_type.update().where( - resource_type.c.name != "instance_network_interface" - ).values({'tablename': resource_type.c.name})) - - op.alter_column("resource_type", "tablename", type_=sa.String(18), - nullable=False) - op.create_unique_constraint("uniq_resource_type0tablename", - "resource_type", ["tablename"]) diff --git a/gnocchi/indexer/alembic/versions/1c2c61ac1f4c_add_original_resource_id_column.py b/gnocchi/indexer/alembic/versions/1c2c61ac1f4c_add_original_resource_id_column.py deleted file mode 100644 index 59632635f..000000000 --- a/gnocchi/indexer/alembic/versions/1c2c61ac1f4c_add_original_resource_id_column.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""add original resource id column - -Revision ID: 1c2c61ac1f4c -Revises: 1f21cbdd6bc2 -Create Date: 2016-01-27 05:57:48.909012 - -""" - -from alembic import op -import sqlalchemy as sa - -# revision identifiers, used by Alembic. -revision = '1c2c61ac1f4c' -down_revision = '62a8dfb139bb' -branch_labels = None -depends_on = None - - -def upgrade(): - op.add_column('resource', sa.Column('original_resource_id', - sa.String(length=255), - nullable=True)) - op.add_column('resource_history', sa.Column('original_resource_id', - sa.String(length=255), - nullable=True)) diff --git a/gnocchi/indexer/alembic/versions/1c98ac614015_initial_base.py b/gnocchi/indexer/alembic/versions/1c98ac614015_initial_base.py deleted file mode 100644 index ff04411f9..000000000 --- a/gnocchi/indexer/alembic/versions/1c98ac614015_initial_base.py +++ /dev/null @@ -1,267 +0,0 @@ -# flake8: noqa -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Initial base for Gnocchi 1.0.0 - -Revision ID: 1c98ac614015 -Revises: -Create Date: 2015-04-27 16:05:13.530625 - -""" - -# revision identifiers, used by Alembic. -revision = '1c98ac614015' -down_revision = None -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy as sa -import sqlalchemy_utils - -import gnocchi.indexer.sqlalchemy_base - - -def upgrade(): - op.create_table('resource', - sa.Column('type', sa.Enum('generic', 'instance', 'swift_account', 'volume', 'ceph_account', 'network', 'identity', 'ipmi', 'stack', 'image', name='resource_type_enum'), nullable=False), - sa.Column('created_by_user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), - sa.Column('created_by_project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), - sa.Column('started_at', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False), - sa.Column('revision_start', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False), - sa.Column('ended_at', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=True), - sa.Column('user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), - sa.Column('project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), - sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), - sa.PrimaryKeyConstraint('id'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_resource_id', 'resource', ['id'], unique=False) - op.create_table('archive_policy', - sa.Column('name', sa.String(length=255), nullable=False), - sa.Column('back_window', sa.Integer(), nullable=False), - sa.Column('definition', gnocchi.indexer.sqlalchemy_base.ArchivePolicyDefinitionType(), nullable=False), - sa.Column('aggregation_methods', gnocchi.indexer.sqlalchemy_base.SetType(), nullable=False), - sa.PrimaryKeyConstraint('name'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_archive_policy_name', 'archive_policy', ['name'], unique=False) - op.create_table('volume', - sa.Column('display_name', sa.String(length=255), nullable=False), - sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), - sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_volume_id_resource_id", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('id'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_volume_id', 'volume', ['id'], unique=False) - op.create_table('instance', - sa.Column('flavor_id', sa.Integer(), nullable=False), - sa.Column('image_ref', sa.String(length=255), nullable=False), - sa.Column('host', sa.String(length=255), nullable=False), - sa.Column('display_name', sa.String(length=255), nullable=False), - sa.Column('server_group', sa.String(length=255), nullable=True), - sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), - sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_instance_id_resource_id", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('id'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_instance_id', 'instance', ['id'], unique=False) - op.create_table('stack', - sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), - sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_stack_id_resource_id", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('id'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_stack_id', 'stack', ['id'], unique=False) - op.create_table('archive_policy_rule', - sa.Column('name', sa.String(length=255), nullable=False), - sa.Column('archive_policy_name', sa.String(length=255), nullable=False), - sa.Column('metric_pattern', sa.String(length=255), nullable=False), - sa.ForeignKeyConstraint(['archive_policy_name'], ['archive_policy.name'], name="fk_archive_policy_rule_archive_policy_name_archive_policy_name", ondelete='RESTRICT'), - sa.PrimaryKeyConstraint('name'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_archive_policy_rule_name', 'archive_policy_rule', ['name'], unique=False) - op.create_table('swift_account', - sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), - sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_swift_account_id_resource_id", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('id'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_swift_account_id', 'swift_account', ['id'], unique=False) - op.create_table('ceph_account', - sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), - sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_ceph_account_id_resource_id", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('id'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_ceph_account_id', 'ceph_account', ['id'], unique=False) - op.create_table('ipmi', - sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), - sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_ipmi_id_resource_id", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('id'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_ipmi_id', 'ipmi', ['id'], unique=False) - op.create_table('image', - sa.Column('name', sa.String(length=255), nullable=False), - sa.Column('container_format', sa.String(length=255), nullable=False), - sa.Column('disk_format', sa.String(length=255), nullable=False), - sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), - sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_image_id_resource_id", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('id'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_image_id', 'image', ['id'], unique=False) - op.create_table('resource_history', - sa.Column('type', sa.Enum('generic', 'instance', 'swift_account', 'volume', 'ceph_account', 'network', 'identity', 'ipmi', 'stack', 'image', name='resource_type_enum'), nullable=False), - sa.Column('created_by_user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), - sa.Column('created_by_project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), - sa.Column('started_at', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False), - sa.Column('revision_start', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False), - sa.Column('ended_at', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=True), - sa.Column('user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), - sa.Column('project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), - sa.Column('revision', sa.Integer(), nullable=False), - sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), - sa.Column('revision_end', gnocchi.indexer.sqlalchemy_base.PreciseTimestamp(), nullable=False), - sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_resource_history_id_resource_id", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('revision'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_resource_history_id', 'resource_history', ['id'], unique=False) - op.create_table('identity', - sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), - sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_identity_id_resource_id", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('id'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_identity_id', 'identity', ['id'], unique=False) - op.create_table('network', - sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), - sa.ForeignKeyConstraint(['id'], ['resource.id'], name="fk_network_id_resource_id", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('id'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_network_id', 'network', ['id'], unique=False) - op.create_table('metric', - sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), - sa.Column('archive_policy_name', sa.String(length=255), nullable=False), - sa.Column('created_by_user_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), - sa.Column('created_by_project_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), - sa.Column('resource_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=True), - sa.Column('name', sa.String(length=255), nullable=True), - sa.ForeignKeyConstraint(['archive_policy_name'], ['archive_policy.name'], name="fk_metric_archive_policy_name_archive_policy_name", ondelete='RESTRICT'), - sa.ForeignKeyConstraint(['resource_id'], ['resource.id'], name="fk_metric_resource_id_resource_id", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('resource_id', 'name', name='uniq_metric0resource_id0name'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_metric_id', 'metric', ['id'], unique=False) - op.create_table('identity_history', - sa.Column('revision', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_identity_history_resource_history_revision", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('revision'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_identity_history_revision', 'identity_history', ['revision'], unique=False) - op.create_table('instance_history', - sa.Column('flavor_id', sa.Integer(), nullable=False), - sa.Column('image_ref', sa.String(length=255), nullable=False), - sa.Column('host', sa.String(length=255), nullable=False), - sa.Column('display_name', sa.String(length=255), nullable=False), - sa.Column('server_group', sa.String(length=255), nullable=True), - sa.Column('revision', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_instance_history_resource_history_revision", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('revision'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_instance_history_revision', 'instance_history', ['revision'], unique=False) - op.create_table('network_history', - sa.Column('revision', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_network_history_resource_history_revision", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('revision'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_network_history_revision', 'network_history', ['revision'], unique=False) - op.create_table('swift_account_history', - sa.Column('revision', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_swift_account_history_resource_history_revision", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('revision'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_swift_account_history_revision', 'swift_account_history', ['revision'], unique=False) - op.create_table('ceph_account_history', - sa.Column('revision', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_ceph_account_history_resource_history_revision", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('revision'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_ceph_account_history_revision', 'ceph_account_history', ['revision'], unique=False) - op.create_table('ipmi_history', - sa.Column('revision', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_ipmi_history_resource_history_revision", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('revision'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_ipmi_history_revision', 'ipmi_history', ['revision'], unique=False) - op.create_table('image_history', - sa.Column('name', sa.String(length=255), nullable=False), - sa.Column('container_format', sa.String(length=255), nullable=False), - sa.Column('disk_format', sa.String(length=255), nullable=False), - sa.Column('revision', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_image_history_resource_history_revision", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('revision'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_image_history_revision', 'image_history', ['revision'], unique=False) - op.create_table('stack_history', - sa.Column('revision', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_stack_history_resource_history_revision", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('revision'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_stack_history_revision', 'stack_history', ['revision'], unique=False) - op.create_table('volume_history', - sa.Column('display_name', sa.String(length=255), nullable=False), - sa.Column('revision', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['revision'], ['resource_history.revision'], name="fk_volume_history_resource_history_revision", ondelete='CASCADE'), - sa.PrimaryKeyConstraint('revision'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - op.create_index('ix_volume_history_revision', 'volume_history', ['revision'], unique=False) diff --git a/gnocchi/indexer/alembic/versions/1e1a63d3d186_original_resource_id_not_null.py b/gnocchi/indexer/alembic/versions/1e1a63d3d186_original_resource_id_not_null.py deleted file mode 100644 index bd73b12b0..000000000 --- a/gnocchi/indexer/alembic/versions/1e1a63d3d186_original_resource_id_not_null.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright 2017 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Make sure resource.original_resource_id is NOT NULL - -Revision ID: 1e1a63d3d186 -Revises: 397987e38570 -Create Date: 2017-01-26 19:33:35.209688 - -""" - -from alembic import op -import sqlalchemy as sa -from sqlalchemy import func -import sqlalchemy_utils - - -# revision identifiers, used by Alembic. -revision = '1e1a63d3d186' -down_revision = '397987e38570' -branch_labels = None -depends_on = None - - -def clean_substr(col, start, length): - return func.lower(func.substr(func.hex(col), start, length)) - - -def upgrade(): - bind = op.get_bind() - for table_name in ('resource', 'resource_history'): - table = sa.Table(table_name, sa.MetaData(), - sa.Column('id', - sqlalchemy_utils.types.uuid.UUIDType(), - nullable=False), - sa.Column('original_resource_id', sa.String(255))) - - # NOTE(gordc): mysql stores id as binary so we need to rebuild back to - # string uuid. - if bind and bind.engine.name == "mysql": - vals = {'original_resource_id': - clean_substr(table.c.id, 1, 8) + '-' + - clean_substr(table.c.id, 9, 4) + '-' + - clean_substr(table.c.id, 13, 4) + '-' + - clean_substr(table.c.id, 17, 4) + '-' + - clean_substr(table.c.id, 21, 12)} - else: - vals = {'original_resource_id': table.c.id} - - op.execute(table.update().where( - table.c.original_resource_id.is_(None)).values(vals)) - op.alter_column(table_name, "original_resource_id", nullable=False, - existing_type=sa.String(255), - existing_nullable=True) diff --git a/gnocchi/indexer/alembic/versions/1f21cbdd6bc2_allow_volume_display_name_to_be_null.py b/gnocchi/indexer/alembic/versions/1f21cbdd6bc2_allow_volume_display_name_to_be_null.py deleted file mode 100644 index e2e48d9b4..000000000 --- a/gnocchi/indexer/alembic/versions/1f21cbdd6bc2_allow_volume_display_name_to_be_null.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""allow volume display name to be null - -Revision ID: 1f21cbdd6bc2 -Revises: 469b308577a9 -Create Date: 2015-12-08 02:12:20.273880 - -""" - -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = '1f21cbdd6bc2' -down_revision = '469b308577a9' -branch_labels = None -depends_on = None - - -def upgrade(): - op.alter_column('volume', 'display_name', - existing_type=sa.String(length=255), - nullable=True) - op.alter_column('volume_history', 'display_name', - existing_type=sa.String(length=255), - nullable=True) diff --git a/gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py b/gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py deleted file mode 100644 index 21dc7e421..000000000 --- a/gnocchi/indexer/alembic/versions/27d2a1d205ff_add_updating_resource_type_states.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Add updating resource type states - -Revision ID: 27d2a1d205ff -Revises: 7e6f9d542f8b -Create Date: 2016-08-31 14:05:34.316496 - -""" - -from alembic import op -import sqlalchemy as sa - -from gnocchi.indexer import sqlalchemy_base -from gnocchi import utils - -# revision identifiers, used by Alembic. -revision = '27d2a1d205ff' -down_revision = '7e6f9d542f8b' -branch_labels = None -depends_on = None - - -resource_type = sa.sql.table( - 'resource_type', - sa.sql.column('updated_at', sqlalchemy_base.PreciseTimestamp())) - -state_enum = sa.Enum("active", "creating", - "creation_error", "deleting", - "deletion_error", "updating", - "updating_error", - name="resource_type_state_enum") - - -def upgrade(): - - op.alter_column('resource_type', 'state', - type_=state_enum, - nullable=False, - server_default=None) - - # NOTE(sileht): postgresql have a builtin ENUM type, so - # just altering the column won't works. - # https://bitbucket.org/zzzeek/alembic/issues/270/altering-enum-type - # Does it break offline migration because we use get_bind() ? - - # NOTE(luogangyi): since we cannot use 'ALTER TYPE' in transaction, - # we split the 'ALTER TYPE' operation into several steps. - bind = op.get_bind() - if bind and bind.engine.name == "postgresql": - op.execute("ALTER TYPE resource_type_state_enum RENAME TO \ - old_resource_type_state_enum") - op.execute("CREATE TYPE resource_type_state_enum AS ENUM \ - ('active', 'creating', 'creation_error', \ - 'deleting', 'deletion_error', 'updating', \ - 'updating_error')") - op.execute("ALTER TABLE resource_type ALTER COLUMN state TYPE \ - resource_type_state_enum USING \ - state::text::resource_type_state_enum") - op.execute("DROP TYPE old_resource_type_state_enum") - - # NOTE(sileht): we can't alter type with server_default set on - # postgresql... - op.alter_column('resource_type', 'state', - type_=state_enum, - nullable=False, - server_default="creating") - op.add_column("resource_type", - sa.Column("updated_at", - sqlalchemy_base.PreciseTimestamp(), - nullable=True)) - - op.execute(resource_type.update().values({'updated_at': utils.utcnow()})) - op.alter_column("resource_type", "updated_at", - type_=sqlalchemy_base.PreciseTimestamp(), - nullable=False) diff --git a/gnocchi/indexer/alembic/versions/2e0b912062d1_drop_useless_enum.py b/gnocchi/indexer/alembic/versions/2e0b912062d1_drop_useless_enum.py deleted file mode 100644 index 5215da094..000000000 --- a/gnocchi/indexer/alembic/versions/2e0b912062d1_drop_useless_enum.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""drop_useless_enum - -Revision ID: 2e0b912062d1 -Revises: 34c517bcc2dd -Create Date: 2016-04-15 07:29:38.492237 - -""" - -from alembic import op - - -# revision identifiers, used by Alembic. -revision = '2e0b912062d1' -down_revision = '34c517bcc2dd' -branch_labels = None -depends_on = None - - -def upgrade(): - bind = op.get_bind() - if bind and bind.engine.name == "postgresql": - # NOTE(sileht): we use IF exists because if the database have - # been created from scratch with 2.1 the enum doesn't exists - op.execute("DROP TYPE IF EXISTS resource_type_enum") diff --git a/gnocchi/indexer/alembic/versions/34c517bcc2dd_shorter_foreign_key.py b/gnocchi/indexer/alembic/versions/34c517bcc2dd_shorter_foreign_key.py deleted file mode 100644 index f7a4a61a4..000000000 --- a/gnocchi/indexer/alembic/versions/34c517bcc2dd_shorter_foreign_key.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""shorter_foreign_key - -Revision ID: 34c517bcc2dd -Revises: ed9c6ddc5c35 -Create Date: 2016-04-13 16:58:42.536431 - -""" - -from alembic import op -import sqlalchemy - -# revision identifiers, used by Alembic. -revision = '34c517bcc2dd' -down_revision = 'ed9c6ddc5c35' -branch_labels = None -depends_on = None - - -resource_type_helper = sqlalchemy.Table( - 'resource_type', - sqlalchemy.MetaData(), - sqlalchemy.Column('tablename', sqlalchemy.String(18), nullable=False) -) - -to_rename = [ - ('fk_metric_archive_policy_name_archive_policy_name', - 'fk_metric_ap_name_ap_name', - 'archive_policy', 'name', - 'metric', 'archive_policy_name', - "RESTRICT"), - ('fk_resource_history_resource_type_name', - 'fk_rh_resource_type_name', - 'resource_type', 'name', 'resource_history', 'type', - "RESTRICT"), - ('fk_resource_history_id_resource_id', - 'fk_rh_id_resource_id', - 'resource', 'id', 'resource_history', 'id', - "CASCADE"), - ('fk_archive_policy_rule_archive_policy_name_archive_policy_name', - 'fk_apr_ap_name_ap_name', - 'archive_policy', 'name', 'archive_policy_rule', 'archive_policy_name', - "RESTRICT") -] - - -def upgrade(): - connection = op.get_bind() - - insp = sqlalchemy.inspect(connection) - - op.alter_column("resource_type", "tablename", - type_=sqlalchemy.String(35), - existing_type=sqlalchemy.String(18), nullable=False) - - for rt in connection.execute(resource_type_helper.select()): - if rt.tablename == "generic": - continue - - fk_names = [fk['name'] for fk in insp.get_foreign_keys("%s_history" % - rt.tablename)] - fk_old = ("fk_%s_history_resource_history_revision" % - rt.tablename) - if fk_old not in fk_names: - # The table have been created from scratch recently - fk_old = ("fk_%s_history_revision_resource_history_revision" % - rt.tablename) - - fk_new = "fk_%s_h_revision_rh_revision" % rt.tablename - to_rename.append((fk_old, fk_new, 'resource_history', 'revision', - "%s_history" % rt.tablename, 'revision', 'CASCADE')) - - for (fk_old, fk_new, src_table, src_col, dst_table, dst_col, ondelete - ) in to_rename: - op.drop_constraint(fk_old, dst_table, type_="foreignkey") - op.create_foreign_key(fk_new, dst_table, src_table, - [dst_col], [src_col], ondelete=ondelete) diff --git a/gnocchi/indexer/alembic/versions/3901f5ea2b8e_create_instance_disk_and_instance_.py b/gnocchi/indexer/alembic/versions/3901f5ea2b8e_create_instance_disk_and_instance_.py deleted file mode 100644 index 2c221f70c..000000000 --- a/gnocchi/indexer/alembic/versions/3901f5ea2b8e_create_instance_disk_and_instance_.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""create instance_disk and instance_net_int tables - -Revision ID: 3901f5ea2b8e -Revises: 42ee7f3e25f8 -Create Date: 2015-08-27 17:00:25.092891 - -""" - -# revision identifiers, used by Alembic. -revision = '3901f5ea2b8e' -down_revision = '42ee7f3e25f8' -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy as sa -import sqlalchemy_utils - - -def upgrade(): - for table in ["resource", "resource_history"]: - op.alter_column(table, "type", - type_=sa.Enum('generic', 'instance', 'swift_account', - 'volume', 'ceph_account', 'network', - 'identity', 'ipmi', 'stack', 'image', - 'instance_network_interface', - 'instance_disk', - name='resource_type_enum'), - nullable=False) - - # NOTE(sileht): postgresql have a builtin ENUM type, so - # just altering the column won't works. - # https://bitbucket.org/zzzeek/alembic/issues/270/altering-enum-type - # Does it break offline migration because we use get_bind() ? - - # NOTE(luogangyi): since we cannot use 'ALTER TYPE' in transaction, - # we split the 'ALTER TYPE' operation into several steps. - bind = op.get_bind() - if bind and bind.engine.name == "postgresql": - op.execute("ALTER TYPE resource_type_enum RENAME TO \ - old_resource_type_enum") - op.execute("CREATE TYPE resource_type_enum AS ENUM \ - ('generic', 'instance', 'swift_account', \ - 'volume', 'ceph_account', 'network', \ - 'identity', 'ipmi', 'stack', 'image', \ - 'instance_network_interface', 'instance_disk')") - for table in ["resource", "resource_history"]: - op.execute("ALTER TABLE %s ALTER COLUMN type TYPE \ - resource_type_enum USING \ - type::text::resource_type_enum" % table) - op.execute("DROP TYPE old_resource_type_enum") - - for table in ['instance_disk', 'instance_net_int']: - op.create_table( - table, - sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=False), - sa.Column('instance_id', - sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=False), - sa.Column('name', sa.String(length=255), nullable=False), - sa.Index('ix_%s_id' % table, 'id', unique=False), - sa.ForeignKeyConstraint(['id'], ['resource.id'], - name="fk_%s_id_resource_id" % table, - ondelete='CASCADE'), - sa.PrimaryKeyConstraint('id'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - - op.create_table( - '%s_history' % table, - sa.Column('instance_id', - sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=False), - sa.Column('name', sa.String(length=255), nullable=False), - sa.Column('revision', sa.Integer(), nullable=False), - sa.Index('ix_%s_history_revision' % table, 'revision', - unique=False), - sa.ForeignKeyConstraint(['revision'], - ['resource_history.revision'], - name=("fk_%s_history_" - "resource_history_revision") % table, - ondelete='CASCADE'), - sa.PrimaryKeyConstraint('revision'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) diff --git a/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py b/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py deleted file mode 100644 index 80b9416e3..000000000 --- a/gnocchi/indexer/alembic/versions/397987e38570_no_more_slash_and_reencode.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright 2017 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Remove slashes from original resource IDs, recompute their id with creator - -Revision ID: 397987e38570 -Revises: aba5a217ca9b -Create Date: 2017-01-11 16:32:40.421758 - -""" -import uuid - -from alembic import op -import six -import sqlalchemy as sa -import sqlalchemy_utils - -from gnocchi import utils - -# revision identifiers, used by Alembic. -revision = '397987e38570' -down_revision = 'aba5a217ca9b' -branch_labels = None -depends_on = None - -resource_type_table = sa.Table( - 'resource_type', - sa.MetaData(), - sa.Column('name', sa.String(255), nullable=False), - sa.Column('tablename', sa.String(35), nullable=False) -) - -resource_table = sa.Table( - 'resource', - sa.MetaData(), - sa.Column('id', - sqlalchemy_utils.types.uuid.UUIDType(), - nullable=False), - sa.Column('original_resource_id', sa.String(255)), - sa.Column('type', sa.String(255)), - sa.Column('creator', sa.String(255)) -) - -resourcehistory_table = sa.Table( - 'resource_history', - sa.MetaData(), - sa.Column('id', - sqlalchemy_utils.types.uuid.UUIDType(), - nullable=False), - sa.Column('original_resource_id', sa.String(255)) -) - -metric_table = sa.Table( - 'metric', - sa.MetaData(), - sa.Column('id', - sqlalchemy_utils.types.uuid.UUIDType(), - nullable=False), - sa.Column('name', sa.String(255)), - sa.Column('resource_id', sqlalchemy_utils.types.uuid.UUIDType()) - -) - - -uuidtype = sqlalchemy_utils.types.uuid.UUIDType() - - -def upgrade(): - connection = op.get_bind() - - resource_type_tables = {} - resource_type_tablenames = dict( - (rt.name, rt.tablename) - for rt in connection.execute(resource_type_table.select()) - if rt.tablename != "generic" - ) - - op.drop_constraint("fk_metric_resource_id_resource_id", "metric", - type_="foreignkey") - for name, table in resource_type_tablenames.items(): - op.drop_constraint("fk_%s_id_resource_id" % table, table, - type_="foreignkey") - - resource_type_tables[name] = sa.Table( - table, - sa.MetaData(), - sa.Column('id', - sqlalchemy_utils.types.uuid.UUIDType(), - nullable=False), - ) - - for resource in connection.execute(resource_table.select()): - - if resource.original_resource_id is None: - # statsd resource has no original_resource_id and is NULL - continue - - try: - orig_as_uuid = uuid.UUID(str(resource.original_resource_id)) - except ValueError: - pass - else: - if orig_as_uuid == resource.id: - continue - - new_original_resource_id = resource.original_resource_id.replace( - '/', '_') - if six.PY2: - new_original_resource_id = new_original_resource_id.encode('utf-8') - new_id = sa.literal(uuidtype.process_bind_param( - str(utils.ResourceUUID( - new_original_resource_id, resource.creator)), - connection.dialect)) - - # resource table - connection.execute( - resource_table.update().where( - resource_table.c.id == resource.id - ).values( - id=new_id, - original_resource_id=new_original_resource_id - ) - ) - # resource history table - connection.execute( - resourcehistory_table.update().where( - resourcehistory_table.c.id == resource.id - ).values( - id=new_id, - original_resource_id=new_original_resource_id - ) - ) - - if resource.type != "generic": - rtable = resource_type_tables[resource.type] - - # resource table (type) - connection.execute( - rtable.update().where( - rtable.c.id == resource.id - ).values(id=new_id) - ) - - # Metric - connection.execute( - metric_table.update().where( - metric_table.c.resource_id == resource.id - ).values( - resource_id=new_id - ) - ) - - for (name, table) in resource_type_tablenames.items(): - op.create_foreign_key("fk_%s_id_resource_id" % table, - table, "resource", - ("id",), ("id",), - ondelete="CASCADE") - - op.create_foreign_key("fk_metric_resource_id_resource_id", - "metric", "resource", - ("resource_id",), ("id",), - ondelete="SET NULL") - - for metric in connection.execute(metric_table.select().where( - metric_table.c.name.like("%/%"))): - connection.execute( - metric_table.update().where( - metric_table.c.id == metric.id - ).values( - name=metric.name.replace('/', '_'), - ) - ) diff --git a/gnocchi/indexer/alembic/versions/39b7d449d46a_create_metric_status_column.py b/gnocchi/indexer/alembic/versions/39b7d449d46a_create_metric_status_column.py deleted file mode 100644 index c3d7be996..000000000 --- a/gnocchi/indexer/alembic/versions/39b7d449d46a_create_metric_status_column.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""create metric status column - -Revision ID: 39b7d449d46a -Revises: 3901f5ea2b8e -Create Date: 2015-09-16 13:25:34.249237 - -""" - -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = '39b7d449d46a' -down_revision = '3901f5ea2b8e' -branch_labels = None -depends_on = None - - -def upgrade(): - enum = sa.Enum("active", "delete", name="metric_status_enum") - enum.create(op.get_bind(), checkfirst=False) - op.add_column("metric", - sa.Column('status', enum, - nullable=False, - server_default="active")) - op.create_index('ix_metric_status', 'metric', ['status'], unique=False) - - op.drop_constraint("fk_metric_resource_id_resource_id", - "metric", type_="foreignkey") - op.create_foreign_key("fk_metric_resource_id_resource_id", - "metric", "resource", - ("resource_id",), ("id",), - ondelete="SET NULL") diff --git a/gnocchi/indexer/alembic/versions/40c6aae14c3f_ck_started_before_ended.py b/gnocchi/indexer/alembic/versions/40c6aae14c3f_ck_started_before_ended.py deleted file mode 100644 index cf6922c9b..000000000 --- a/gnocchi/indexer/alembic/versions/40c6aae14c3f_ck_started_before_ended.py +++ /dev/null @@ -1,39 +0,0 @@ -# -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""ck_started_before_ended - -Revision ID: 40c6aae14c3f -Revises: 1c98ac614015 -Create Date: 2015-04-28 16:35:11.999144 - -""" - -# revision identifiers, used by Alembic. -revision = '40c6aae14c3f' -down_revision = '1c98ac614015' -branch_labels = None -depends_on = None - -from alembic import op - - -def upgrade(): - op.create_check_constraint("ck_started_before_ended", - "resource", - "started_at <= ended_at") - op.create_check_constraint("ck_started_before_ended", - "resource_history", - "started_at <= ended_at") diff --git a/gnocchi/indexer/alembic/versions/42ee7f3e25f8_alter_flavorid_from_int_to_string.py b/gnocchi/indexer/alembic/versions/42ee7f3e25f8_alter_flavorid_from_int_to_string.py deleted file mode 100644 index e8d10d44f..000000000 --- a/gnocchi/indexer/alembic/versions/42ee7f3e25f8_alter_flavorid_from_int_to_string.py +++ /dev/null @@ -1,38 +0,0 @@ -# -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""alter flavorid from int to string - -Revision ID: 42ee7f3e25f8 -Revises: f7d44b47928 -Create Date: 2015-05-10 21:20:24.941263 - -""" - -# revision identifiers, used by Alembic. -revision = '42ee7f3e25f8' -down_revision = 'f7d44b47928' -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy as sa - - -def upgrade(): - for table in ('instance', 'instance_history'): - op.alter_column(table, "flavor_id", - type_=sa.String(length=255), - nullable=False) diff --git a/gnocchi/indexer/alembic/versions/469b308577a9_allow_image_ref_to_be_null.py b/gnocchi/indexer/alembic/versions/469b308577a9_allow_image_ref_to_be_null.py deleted file mode 100644 index 5ac8dfcf7..000000000 --- a/gnocchi/indexer/alembic/versions/469b308577a9_allow_image_ref_to_be_null.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""allow image_ref to be null - -Revision ID: 469b308577a9 -Revises: 39b7d449d46a -Create Date: 2015-11-29 00:23:39.998256 - -""" - -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = '469b308577a9' -down_revision = '39b7d449d46a' -branch_labels = None -depends_on = None - - -def upgrade(): - op.alter_column('instance', 'image_ref', - existing_type=sa.String(length=255), - nullable=True) - op.alter_column('instance_history', 'image_ref', - existing_type=sa.String(length=255), - nullable=True) diff --git a/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py b/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py deleted file mode 100644 index 824a3e93a..000000000 --- a/gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py +++ /dev/null @@ -1,77 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""mysql_float_to_timestamp - -Revision ID: 5c4f93e5bb4 -Revises: 7e6f9d542f8b -Create Date: 2016-07-25 15:36:36.469847 - -""" - -from alembic import op -import sqlalchemy as sa -from sqlalchemy.sql import func - -from gnocchi.indexer import sqlalchemy_base - -# revision identifiers, used by Alembic. -revision = '5c4f93e5bb4' -down_revision = '27d2a1d205ff' -branch_labels = None -depends_on = None - - -def upgrade(): - bind = op.get_bind() - if bind and bind.engine.name == "mysql": - op.execute("SET time_zone = '+00:00'") - # NOTE(jd) So that crappy engine that is MySQL does not have "ALTER - # TABLE … USING …". We need to copy everything and convert… - for table_name, column_name in (("resource", "started_at"), - ("resource", "ended_at"), - ("resource", "revision_start"), - ("resource_history", "started_at"), - ("resource_history", "ended_at"), - ("resource_history", "revision_start"), - ("resource_history", "revision_end"), - ("resource_type", "updated_at")): - - nullable = column_name == "ended_at" - - existing_type = sa.types.DECIMAL( - precision=20, scale=6, asdecimal=True) - existing_col = sa.Column( - column_name, - existing_type, - nullable=nullable) - temp_col = sa.Column( - column_name + "_ts", - sqlalchemy_base.TimestampUTC(), - nullable=True) - op.add_column(table_name, temp_col) - t = sa.sql.table(table_name, existing_col, temp_col) - op.execute(t.update().values( - **{column_name + "_ts": func.from_unixtime(existing_col)})) - op.drop_column(table_name, column_name) - op.alter_column(table_name, - column_name + "_ts", - nullable=nullable, - type_=sqlalchemy_base.TimestampUTC(), - existing_nullable=nullable, - existing_type=existing_type, - new_column_name=column_name) diff --git a/gnocchi/indexer/alembic/versions/62a8dfb139bb_change_uuid_to_string.py b/gnocchi/indexer/alembic/versions/62a8dfb139bb_change_uuid_to_string.py deleted file mode 100644 index 9dbb437c1..000000000 --- a/gnocchi/indexer/alembic/versions/62a8dfb139bb_change_uuid_to_string.py +++ /dev/null @@ -1,249 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Change uuid to string - -Revision ID: 62a8dfb139bb -Revises: 1f21cbdd6bc2 -Create Date: 2016-01-20 11:57:45.954607 - -""" - -from alembic import op -import sqlalchemy as sa -import sqlalchemy_utils - - -# revision identifiers, used by Alembic. -revision = '62a8dfb139bb' -down_revision = '1f21cbdd6bc2' -branch_labels = None -depends_on = None - -resourcehelper = sa.Table( - 'resource', - sa.MetaData(), - sa.Column('id', - sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=False), - sa.Column('tmp_created_by_user_id', - sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=True), - sa.Column('tmp_created_by_project_id', - sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=True), - sa.Column('tmp_user_id', - sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=True), - sa.Column('tmp_project_id', - sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=True), - sa.Column('created_by_user_id', - sa.String(length=255), - nullable=True), - sa.Column('created_by_project_id', - sa.String(length=255), - nullable=True), - sa.Column('user_id', - sa.String(length=255), - nullable=True), - sa.Column('project_id', - sa.String(length=255), - nullable=True), -) - -resourcehistoryhelper = sa.Table( - 'resource_history', - sa.MetaData(), - sa.Column('id', - sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=False), - sa.Column('tmp_created_by_user_id', - sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=True), - sa.Column('tmp_created_by_project_id', - sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=True), - sa.Column('tmp_user_id', - sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=True), - sa.Column('tmp_project_id', - sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=True), - sa.Column('created_by_user_id', - sa.String(length=255), - nullable=True), - sa.Column('created_by_project_id', - sa.String(length=255), - nullable=True), - sa.Column('user_id', - sa.String(length=255), - nullable=True), - sa.Column('project_id', - sa.String(length=255), - nullable=True), -) - -metrichelper = sa.Table( - 'metric', - sa.MetaData(), - sa.Column('id', - sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=False), - sa.Column('tmp_created_by_user_id', - sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=True), - sa.Column('tmp_created_by_project_id', - sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=True), - sa.Column('created_by_user_id', - sa.String(length=255), - nullable=True), - sa.Column('created_by_project_id', - sa.String(length=255), - nullable=True), -) - - -def upgrade(): - connection = op.get_bind() - - # Rename user/project fields to tmp_* - op.alter_column('metric', 'created_by_project_id', - new_column_name='tmp_created_by_project_id', - existing_type=sa.BINARY(length=16)) - op.alter_column('metric', 'created_by_user_id', - new_column_name='tmp_created_by_user_id', - existing_type=sa.BINARY(length=16)) - op.alter_column('resource', 'created_by_project_id', - new_column_name='tmp_created_by_project_id', - existing_type=sa.BINARY(length=16)) - op.alter_column('resource', 'created_by_user_id', - new_column_name='tmp_created_by_user_id', - existing_type=sa.BINARY(length=16)) - op.alter_column('resource', 'project_id', - new_column_name='tmp_project_id', - existing_type=sa.BINARY(length=16)) - op.alter_column('resource', 'user_id', - new_column_name='tmp_user_id', - existing_type=sa.BINARY(length=16)) - op.alter_column('resource_history', 'created_by_project_id', - new_column_name='tmp_created_by_project_id', - existing_type=sa.BINARY(length=16)) - op.alter_column('resource_history', 'created_by_user_id', - new_column_name='tmp_created_by_user_id', - existing_type=sa.BINARY(length=16)) - op.alter_column('resource_history', 'project_id', - new_column_name='tmp_project_id', - existing_type=sa.BINARY(length=16)) - op.alter_column('resource_history', 'user_id', - new_column_name='tmp_user_id', - existing_type=sa.BINARY(length=16)) - - # Add new user/project fields as strings - op.add_column('metric', - sa.Column('created_by_project_id', - sa.String(length=255), nullable=True)) - op.add_column('metric', - sa.Column('created_by_user_id', - sa.String(length=255), nullable=True)) - op.add_column('resource', - sa.Column('created_by_project_id', - sa.String(length=255), nullable=True)) - op.add_column('resource', - sa.Column('created_by_user_id', - sa.String(length=255), nullable=True)) - op.add_column('resource', - sa.Column('project_id', - sa.String(length=255), nullable=True)) - op.add_column('resource', - sa.Column('user_id', - sa.String(length=255), nullable=True)) - op.add_column('resource_history', - sa.Column('created_by_project_id', - sa.String(length=255), nullable=True)) - op.add_column('resource_history', - sa.Column('created_by_user_id', - sa.String(length=255), nullable=True)) - op.add_column('resource_history', - sa.Column('project_id', - sa.String(length=255), nullable=True)) - op.add_column('resource_history', - sa.Column('user_id', - sa.String(length=255), nullable=True)) - - # Migrate data - for tablehelper in [resourcehelper, resourcehistoryhelper]: - for resource in connection.execute(tablehelper.select()): - if resource.tmp_created_by_project_id: - created_by_project_id = \ - str(resource.tmp_created_by_project_id).replace('-', '') - else: - created_by_project_id = None - if resource.tmp_created_by_user_id: - created_by_user_id = \ - str(resource.tmp_created_by_user_id).replace('-', '') - else: - created_by_user_id = None - if resource.tmp_project_id: - project_id = str(resource.tmp_project_id).replace('-', '') - else: - project_id = None - if resource.tmp_user_id: - user_id = str(resource.tmp_user_id).replace('-', '') - else: - user_id = None - - connection.execute( - tablehelper.update().where( - tablehelper.c.id == resource.id - ).values( - created_by_project_id=created_by_project_id, - created_by_user_id=created_by_user_id, - project_id=project_id, - user_id=user_id, - ) - ) - for metric in connection.execute(metrichelper.select()): - if resource.tmp_created_by_project_id: - created_by_project_id = \ - str(resource.tmp_created_by_project_id).replace('-', '') - else: - created_by_project_id = None - if resource.tmp_created_by_user_id: - created_by_user_id = \ - str(resource.tmp_created_by_user_id).replace('-', '') - else: - created_by_user_id = None - connection.execute( - metrichelper.update().where( - metrichelper.c.id == metric.id - ).values( - created_by_project_id=created_by_project_id, - created_by_user_id=created_by_user_id, - ) - ) - - # Delete temp fields - op.drop_column('metric', 'tmp_created_by_project_id') - op.drop_column('metric', 'tmp_created_by_user_id') - op.drop_column('resource', 'tmp_created_by_project_id') - op.drop_column('resource', 'tmp_created_by_user_id') - op.drop_column('resource', 'tmp_project_id') - op.drop_column('resource', 'tmp_user_id') - op.drop_column('resource_history', 'tmp_created_by_project_id') - op.drop_column('resource_history', 'tmp_created_by_user_id') - op.drop_column('resource_history', 'tmp_project_id') - op.drop_column('resource_history', 'tmp_user_id') diff --git a/gnocchi/indexer/alembic/versions/7e6f9d542f8b_resource_type_state_column.py b/gnocchi/indexer/alembic/versions/7e6f9d542f8b_resource_type_state_column.py deleted file mode 100644 index 9b3a88ff9..000000000 --- a/gnocchi/indexer/alembic/versions/7e6f9d542f8b_resource_type_state_column.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""resource_type state column - -Revision ID: 7e6f9d542f8b -Revises: c62df18bf4ee -Create Date: 2016-05-19 16:52:58.939088 - -""" - -from alembic import op -import sqlalchemy as sa - -# revision identifiers, used by Alembic. -revision = '7e6f9d542f8b' -down_revision = 'c62df18bf4ee' -branch_labels = None -depends_on = None - - -def upgrade(): - states = ("active", "creating", "creation_error", "deleting", - "deletion_error") - enum = sa.Enum(*states, name="resource_type_state_enum") - enum.create(op.get_bind(), checkfirst=False) - op.add_column("resource_type", - sa.Column('state', enum, nullable=False, - server_default="creating")) - rt = sa.sql.table('resource_type', sa.sql.column('state', enum)) - op.execute(rt.update().values(state="active")) diff --git a/gnocchi/indexer/alembic/versions/828c16f70cce_create_resource_type_table.py b/gnocchi/indexer/alembic/versions/828c16f70cce_create_resource_type_table.py deleted file mode 100644 index c95d2684b..000000000 --- a/gnocchi/indexer/alembic/versions/828c16f70cce_create_resource_type_table.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""create resource_type table - -Revision ID: 828c16f70cce -Revises: 9901e5ea4b6e -Create Date: 2016-01-19 12:47:19.384127 - -""" - -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = '828c16f70cce' -down_revision = '9901e5ea4b6e' -branch_labels = None -depends_on = None - - -type_string = sa.String(255) -type_enum = sa.Enum('generic', 'instance', - 'swift_account', 'volume', - 'ceph_account', 'network', - 'identity', 'ipmi', 'stack', - 'image', 'instance_disk', - 'instance_network_interface', - 'host', 'host_disk', - 'host_network_interface', - name="resource_type_enum") - - -def type_string_col(name, table): - return sa.Column( - name, type_string, - sa.ForeignKey('resource_type.name', - ondelete="RESTRICT", - name="fk_%s_resource_type_name" % table)) - - -def type_enum_col(name): - return sa.Column(name, type_enum, - nullable=False, default='generic') - - -def upgrade(): - resource_type = op.create_table( - 'resource_type', - sa.Column('name', sa.String(length=255), nullable=False), - sa.PrimaryKeyConstraint('name'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - - resource = sa.Table('resource', sa.MetaData(), - type_string_col("type", "resource")) - op.execute(resource_type.insert().from_select( - ['name'], sa.select([resource.c.type]).distinct())) - - for table in ["resource", "resource_history"]: - op.alter_column(table, "type", new_column_name="old_type", - existing_type=type_enum) - op.add_column(table, type_string_col("type", table)) - sa_table = sa.Table(table, sa.MetaData(), - type_string_col("type", table), - type_enum_col('old_type')) - op.execute(sa_table.update().values( - {sa_table.c.type: sa_table.c.old_type})) - op.drop_column(table, "old_type") - op.alter_column(table, "type", nullable=False, - existing_type=type_string) diff --git a/gnocchi/indexer/alembic/versions/8f376189b9eb_migrate_legacy_resources_to_db.py b/gnocchi/indexer/alembic/versions/8f376189b9eb_migrate_legacy_resources_to_db.py deleted file mode 100644 index f1a83bd40..000000000 --- a/gnocchi/indexer/alembic/versions/8f376189b9eb_migrate_legacy_resources_to_db.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Migrate legacy resources to DB - -Revision ID: 8f376189b9eb -Revises: d24877c22ab0 -Create Date: 2016-01-20 15:03:28.115656 - -""" -import json - -from alembic import op -import sqlalchemy as sa - -from gnocchi.indexer import sqlalchemy_legacy_resources as legacy - -# revision identifiers, used by Alembic. -revision = '8f376189b9eb' -down_revision = 'd24877c22ab0' -branch_labels = None -depends_on = None - - -def upgrade(): - resource_type = sa.Table( - 'resource_type', sa.MetaData(), - sa.Column('name', sa.String(255), nullable=False), - sa.Column('attributes', sa.Text, nullable=False) - ) - - for name, attributes in legacy.ceilometer_resources.items(): - text_attributes = json.dumps(attributes) - op.execute(resource_type.update().where( - resource_type.c.name == name - ).values({resource_type.c.attributes: text_attributes})) diff --git a/gnocchi/indexer/alembic/versions/9901e5ea4b6e_create_host.py b/gnocchi/indexer/alembic/versions/9901e5ea4b6e_create_host.py deleted file mode 100644 index 901e6f8f9..000000000 --- a/gnocchi/indexer/alembic/versions/9901e5ea4b6e_create_host.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""create host tables - -Revision ID: 9901e5ea4b6e -Revises: a54c57ada3f5 -Create Date: 2015-12-15 17:20:25.092891 - -""" - -# revision identifiers, used by Alembic. -revision = '9901e5ea4b6e' -down_revision = 'a54c57ada3f5' -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy as sa -import sqlalchemy_utils - - -def upgrade(): - for table in ["resource", "resource_history"]: - op.alter_column(table, "type", - type_=sa.Enum('generic', 'instance', 'swift_account', - 'volume', 'ceph_account', 'network', - 'identity', 'ipmi', 'stack', 'image', - 'instance_network_interface', - 'instance_disk', - 'host', 'host_disk', - 'host_network_interface', - name='resource_type_enum'), - nullable=False) - - # NOTE(sileht): postgresql have a builtin ENUM type, so - # just altering the column won't works. - # https://bitbucket.org/zzzeek/alembic/issues/270/altering-enum-type - # Does it break offline migration because we use get_bind() ? - - # NOTE(luogangyi): since we cannot use 'ALTER TYPE' in transaction, - # we split the 'ALTER TYPE' operation into several steps. - bind = op.get_bind() - if bind and bind.engine.name == "postgresql": - op.execute("ALTER TYPE resource_type_enum RENAME TO \ - old_resource_type_enum") - op.execute("CREATE TYPE resource_type_enum AS ENUM \ - ('generic', 'instance', 'swift_account', \ - 'volume', 'ceph_account', 'network', \ - 'identity', 'ipmi', 'stack', 'image', \ - 'instance_network_interface', 'instance_disk', \ - 'host', 'host_disk', \ - 'host_network_interface')") - for table in ["resource", "resource_history"]: - op.execute("ALTER TABLE %s ALTER COLUMN type TYPE \ - resource_type_enum USING \ - type::text::resource_type_enum" % table) - op.execute("DROP TYPE old_resource_type_enum") - - op.create_table( - 'host', - sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=False), - sa.Column('host_name', sa.String(length=255), nullable=False), - sa.ForeignKeyConstraint(['id'], ['resource.id'], - name="fk_hypervisor_id_resource_id", - ondelete='CASCADE'), - sa.PrimaryKeyConstraint('id'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - - op.create_table( - 'host_history', - sa.Column('host_name', sa.String(length=255), nullable=False), - sa.Column('revision', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['revision'], - ['resource_history.revision'], - name=("fk_hypervisor_history_" - "resource_history_revision"), - ondelete='CASCADE'), - sa.PrimaryKeyConstraint('revision'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - - for table in ['host_disk', 'host_net_int']: - op.create_table( - table, - sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=False), - sa.Column('host_name', sa.String(length=255), nullable=False), - sa.Column('device_name', sa.String(length=255), nullable=True), - sa.ForeignKeyConstraint(['id'], ['resource.id'], - name="fk_%s_id_resource_id" % table, - ondelete='CASCADE'), - sa.PrimaryKeyConstraint('id'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) - - op.create_table( - '%s_history' % table, - sa.Column('host_name', sa.String(length=255), nullable=False), - sa.Column('device_name', sa.String(length=255), nullable=True), - sa.Column('revision', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['revision'], - ['resource_history.revision'], - name=("fk_%s_history_" - "resource_history_revision") % table, - ondelete='CASCADE'), - sa.PrimaryKeyConstraint('revision'), - mysql_charset='utf8', - mysql_engine='InnoDB' - ) diff --git a/gnocchi/indexer/alembic/versions/a54c57ada3f5_removes_useless_indexes.py b/gnocchi/indexer/alembic/versions/a54c57ada3f5_removes_useless_indexes.py deleted file mode 100644 index b979857ad..000000000 --- a/gnocchi/indexer/alembic/versions/a54c57ada3f5_removes_useless_indexes.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""merges primarykey and indexes - -Revision ID: a54c57ada3f5 -Revises: 1c2c61ac1f4c -Create Date: 2016-02-04 09:09:23.180955 - -""" - -from alembic import op - - -# revision identifiers, used by Alembic. -revision = 'a54c57ada3f5' -down_revision = '1c2c61ac1f4c' -branch_labels = None -depends_on = None - -resource_tables = [(t, "id") for t in [ - "instance", - "instance_disk", - "instance_net_int", - "swift_account", - "volume", - "ceph_account", - "network", - "identity", - "ipmi", - "stack", - "image" -]] -history_tables = [("%s_history" % t, "revision") - for t, c in resource_tables] -other_tables = [("metric", "id"), ("archive_policy", "name"), - ("archive_policy_rule", "name"), - ("resource", "id"), - ("resource_history", "id")] - - -def upgrade(): - bind = op.get_bind() - # NOTE(sileht): mysql can't delete an index on a foreign key - # even this one is not the index used by the foreign key itself... - # In our case we have two indexes fk_resource_history_id_resource_id and - # and ix_resource_history_id, we want to delete only the second, but mysql - # can't do that with a simple DROP INDEX ix_resource_history_id... - # so we have to remove the constraint and put it back... - if bind.engine.name == "mysql": - op.drop_constraint("fk_resource_history_id_resource_id", - type_="foreignkey", table_name="resource_history") - - for table, colname in resource_tables + history_tables + other_tables: - op.drop_index("ix_%s_%s" % (table, colname), table_name=table) - - if bind.engine.name == "mysql": - op.create_foreign_key("fk_resource_history_id_resource_id", - "resource_history", "resource", ["id"], ["id"], - ondelete="CASCADE") diff --git a/gnocchi/indexer/alembic/versions/aba5a217ca9b_merge_created_in_creator.py b/gnocchi/indexer/alembic/versions/aba5a217ca9b_merge_created_in_creator.py deleted file mode 100644 index 72339057b..000000000 --- a/gnocchi/indexer/alembic/versions/aba5a217ca9b_merge_created_in_creator.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""merge_created_in_creator - -Revision ID: aba5a217ca9b -Revises: 5c4f93e5bb4 -Create Date: 2016-12-06 17:40:25.344578 - -""" - -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = 'aba5a217ca9b' -down_revision = '5c4f93e5bb4' -branch_labels = None -depends_on = None - - -def upgrade(): - for table_name in ("resource", "resource_history", "metric"): - creator_col = sa.Column("creator", sa.String(255)) - created_by_user_id_col = sa.Column("created_by_user_id", - sa.String(255)) - created_by_project_id_col = sa.Column("created_by_project_id", - sa.String(255)) - op.add_column(table_name, creator_col) - t = sa.sql.table( - table_name, creator_col, - created_by_user_id_col, created_by_project_id_col) - op.execute( - t.update().values( - creator=( - created_by_user_id_col + ":" + created_by_project_id_col - )).where((created_by_user_id_col is not None) - | (created_by_project_id_col is not None))) - op.drop_column(table_name, "created_by_user_id") - op.drop_column(table_name, "created_by_project_id") diff --git a/gnocchi/indexer/alembic/versions/c62df18bf4ee_add_unit_column_for_metric.py b/gnocchi/indexer/alembic/versions/c62df18bf4ee_add_unit_column_for_metric.py deleted file mode 100644 index 7d4deef59..000000000 --- a/gnocchi/indexer/alembic/versions/c62df18bf4ee_add_unit_column_for_metric.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""add unit column for metric - -Revision ID: c62df18bf4ee -Revises: 2e0b912062d1 -Create Date: 2016-05-04 12:31:25.350190 - -""" - -from alembic import op -import sqlalchemy as sa - - -# revision identifiers, used by Alembic. -revision = 'c62df18bf4ee' -down_revision = '2e0b912062d1' -branch_labels = None -depends_on = None - - -def upgrade(): - op.add_column('metric', sa.Column('unit', - sa.String(length=31), - nullable=True)) diff --git a/gnocchi/indexer/alembic/versions/d24877c22ab0_add_attributes_to_resource_type.py b/gnocchi/indexer/alembic/versions/d24877c22ab0_add_attributes_to_resource_type.py deleted file mode 100644 index dda81e504..000000000 --- a/gnocchi/indexer/alembic/versions/d24877c22ab0_add_attributes_to_resource_type.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""Add attributes to resource_type - -Revision ID: d24877c22ab0 -Revises: 0718ed97e5b3 -Create Date: 2016-01-19 22:45:06.431190 - -""" - -from alembic import op -import sqlalchemy as sa -import sqlalchemy_utils as sa_utils - - -# revision identifiers, used by Alembic. -revision = 'd24877c22ab0' -down_revision = '0718ed97e5b3' -branch_labels = None -depends_on = None - - -def upgrade(): - op.add_column("resource_type", - sa.Column('attributes', sa_utils.JSONType(),)) diff --git a/gnocchi/indexer/alembic/versions/ed9c6ddc5c35_fix_host_foreign_key.py b/gnocchi/indexer/alembic/versions/ed9c6ddc5c35_fix_host_foreign_key.py deleted file mode 100644 index e5cfdd02b..000000000 --- a/gnocchi/indexer/alembic/versions/ed9c6ddc5c35_fix_host_foreign_key.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""fix_host_foreign_key - -Revision ID: ed9c6ddc5c35 -Revises: ffc7bbeec0b0 -Create Date: 2016-04-15 06:25:34.649934 - -""" - -from alembic import op -from sqlalchemy import inspect - -# revision identifiers, used by Alembic. -revision = 'ed9c6ddc5c35' -down_revision = 'ffc7bbeec0b0' -branch_labels = None -depends_on = None - - -def upgrade(): - conn = op.get_bind() - - insp = inspect(conn) - fk_names = [fk['name'] for fk in insp.get_foreign_keys('host')] - if ("fk_hypervisor_id_resource_id" not in fk_names and - "fk_host_id_resource_id" in fk_names): - # NOTE(sileht): we are already good, the BD have been created from - # scratch after "a54c57ada3f5" - return - - op.drop_constraint("fk_hypervisor_id_resource_id", "host", - type_="foreignkey") - op.drop_constraint("fk_hypervisor_history_resource_history_revision", - "host_history", type_="foreignkey") - op.create_foreign_key("fk_host_id_resource_id", "host", "resource", - ["id"], ["id"], ondelete="CASCADE") - op.create_foreign_key("fk_host_history_resource_history_revision", - "host_history", "resource_history", - ["revision"], ["revision"], ondelete="CASCADE") diff --git a/gnocchi/indexer/alembic/versions/f7d44b47928_uuid_to_binary.py b/gnocchi/indexer/alembic/versions/f7d44b47928_uuid_to_binary.py deleted file mode 100644 index c53c725df..000000000 --- a/gnocchi/indexer/alembic/versions/f7d44b47928_uuid_to_binary.py +++ /dev/null @@ -1,89 +0,0 @@ -# -# Copyright 2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""uuid_to_binary - -Revision ID: f7d44b47928 -Revises: 40c6aae14c3f -Create Date: 2015-04-30 13:29:29.074794 - -""" - -# revision identifiers, used by Alembic. -revision = 'f7d44b47928' -down_revision = '40c6aae14c3f' -branch_labels = None -depends_on = None - -from alembic import op -import sqlalchemy_utils.types.uuid - - -def upgrade(): - op.alter_column("metric", "id", - type_=sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=False) - - for table in ('resource', 'resource_history', 'metric'): - op.alter_column(table, "created_by_user_id", - type_=sqlalchemy_utils.types.uuid.UUIDType( - binary=True)) - op.alter_column(table, "created_by_project_id", - type_=sqlalchemy_utils.types.uuid.UUIDType( - binary=True)) - for table in ('resource', 'resource_history'): - op.alter_column(table, "user_id", - type_=sqlalchemy_utils.types.uuid.UUIDType( - binary=True)) - op.alter_column(table, "project_id", - type_=sqlalchemy_utils.types.uuid.UUIDType( - binary=True)) - - # Drop all foreign keys linking to resource.id - for table in ('ceph_account', 'identity', 'volume', 'swift_account', - 'ipmi', 'image', 'network', 'stack', 'instance', - 'resource_history'): - op.drop_constraint("fk_%s_id_resource_id" % table, table, - type_="foreignkey") - - op.drop_constraint("fk_metric_resource_id_resource_id", "metric", - type_="foreignkey") - - # Now change the type of resource.id - op.alter_column("resource", "id", - type_=sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=False) - - # Now change all the types of $table.id and re-add the FK - for table in ('ceph_account', 'identity', 'volume', 'swift_account', - 'ipmi', 'image', 'network', 'stack', 'instance', - 'resource_history'): - op.alter_column( - table, "id", - type_=sqlalchemy_utils.types.uuid.UUIDType(binary=True), - nullable=False) - - op.create_foreign_key("fk_%s_id_resource_id" % table, - table, "resource", - ("id",), ("id",), - ondelete="CASCADE") - - op.alter_column("metric", "resource_id", - type_=sqlalchemy_utils.types.uuid.UUIDType(binary=True)) - - op.create_foreign_key("fk_metric_resource_id_resource_id", - "metric", "resource", - ("resource_id",), ("id",), - ondelete="CASCADE") diff --git a/gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py b/gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py deleted file mode 100644 index 1be98151d..000000000 --- a/gnocchi/indexer/alembic/versions/ffc7bbeec0b0_migrate_legacy_resources_to_db2.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -"""migrate_legacy_resources_to_db2 - -Revision ID: ffc7bbeec0b0 -Revises: 8f376189b9eb -Create Date: 2016-04-14 15:57:13.072128 - -""" -import json - -from alembic import op -import sqlalchemy as sa - -from gnocchi.indexer import sqlalchemy_legacy_resources as legacy - -# revision identifiers, used by Alembic. -revision = 'ffc7bbeec0b0' -down_revision = '8f376189b9eb' -branch_labels = None -depends_on = None - - -def upgrade(): - bind = op.get_bind() - - resource_type = sa.Table( - 'resource_type', sa.MetaData(), - sa.Column('name', sa.String(255), nullable=False), - sa.Column('tablename', sa.String(18), nullable=False), - sa.Column('attributes', sa.Text, nullable=False) - ) - - # NOTE(gordc): fix for incorrect migration: - # 0718ed97e5b3_add_tablename_to_resource_type.py#L46 - op.execute(resource_type.update().where( - resource_type.c.name == "instance_network_interface" - ).values({'tablename': 'instance_net_int'})) - - resource_type_names = [rt.name for rt in - list(bind.execute(resource_type.select()))] - - for name, attributes in legacy.ceilometer_resources.items(): - if name in resource_type_names: - continue - tablename = legacy.ceilometer_tablenames.get(name, name) - text_attributes = json.dumps(attributes) - op.execute(resource_type.insert().values({ - resource_type.c.attributes: text_attributes, - resource_type.c.name: name, - resource_type.c.tablename: tablename, - })) diff --git a/gnocchi/indexer/sqlalchemy.py b/gnocchi/indexer/sqlalchemy.py deleted file mode 100644 index 3497b52d6..000000000 --- a/gnocchi/indexer/sqlalchemy.py +++ /dev/null @@ -1,1235 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from __future__ import absolute_import -import itertools -import operator -import os.path -import threading -import uuid - -from alembic import migration -from alembic import operations -import oslo_db.api -from oslo_db import exception -from oslo_db.sqlalchemy import enginefacade -from oslo_db.sqlalchemy import utils as oslo_db_utils -from oslo_log import log -try: - import psycopg2 -except ImportError: - psycopg2 = None -try: - import pymysql.constants.ER - import pymysql.err -except ImportError: - pymysql = None -import six -import sqlalchemy -from sqlalchemy.engine import url as sqlalchemy_url -import sqlalchemy.exc -from sqlalchemy import types -import sqlalchemy_utils - -from gnocchi import exceptions -from gnocchi import indexer -from gnocchi.indexer import sqlalchemy_base as base -from gnocchi import resource_type -from gnocchi import utils - -Base = base.Base -Metric = base.Metric -ArchivePolicy = base.ArchivePolicy -ArchivePolicyRule = base.ArchivePolicyRule -Resource = base.Resource -ResourceHistory = base.ResourceHistory -ResourceType = base.ResourceType - -_marker = indexer._marker - -LOG = log.getLogger(__name__) - - -def _retry_on_exceptions(exc): - if not isinstance(exc, exception.DBError): - return False - inn_e = exc.inner_exception - if not isinstance(inn_e, sqlalchemy.exc.InternalError): - return False - return (( - pymysql and - isinstance(inn_e.orig, pymysql.err.InternalError) and - (inn_e.orig.args[0] == pymysql.constants.ER.TABLE_DEF_CHANGED) - ) or ( - # HACK(jd) Sometimes, PostgreSQL raises an error such as "current - # transaction is aborted, commands ignored until end of transaction - # block" on its own catalog, so we need to retry, but this is not - # caught by oslo.db as a deadlock. This is likely because when we use - # Base.metadata.create_all(), sqlalchemy itself gets an error it does - # not catch or something. So this is why this function exists. To - # paperover I guess. - psycopg2 - and isinstance(inn_e.orig, psycopg2.InternalError) - # current transaction is aborted - and inn_e.orig.pgcode == '25P02' - )) - - -def retry_on_deadlock(f): - return oslo_db.api.wrap_db_retry(retry_on_deadlock=True, - max_retries=20, - retry_interval=0.1, - max_retry_interval=2, - exception_checker=_retry_on_exceptions)(f) - - -class PerInstanceFacade(object): - def __init__(self, conf): - self.trans = enginefacade.transaction_context() - self.trans.configure( - **dict(conf.database.items()) - ) - self._context = threading.local() - - def independent_writer(self): - return self.trans.independent.writer.using(self._context) - - def independent_reader(self): - return self.trans.independent.reader.using(self._context) - - def writer_connection(self): - return self.trans.connection.writer.using(self._context) - - def reader_connection(self): - return self.trans.connection.reader.using(self._context) - - def writer(self): - return self.trans.writer.using(self._context) - - def reader(self): - return self.trans.reader.using(self._context) - - def get_engine(self): - # TODO(mbayer): add get_engine() to enginefacade - if not self.trans._factory._started: - self.trans._factory._start() - return self.trans._factory._writer_engine - - def dispose(self): - # TODO(mbayer): add dispose() to enginefacade - if self.trans._factory._started: - self.trans._factory._writer_engine.dispose() - - -class ResourceClassMapper(object): - def __init__(self): - # FIXME(sileht): 3 attributes, perhaps we need a better structure. - self._cache = {'generic': {'resource': base.Resource, - 'history': base.ResourceHistory, - 'updated_at': utils.utcnow()}} - - @staticmethod - def _build_class_mappers(resource_type, baseclass=None): - tablename = resource_type.tablename - tables_args = {"extend_existing": True} - tables_args.update(base.COMMON_TABLES_ARGS) - # TODO(sileht): Add columns - if not baseclass: - baseclass = resource_type.to_baseclass() - resource_ext = type( - str("%s_resource" % tablename), - (baseclass, base.ResourceExtMixin, base.Resource), - {"__tablename__": tablename, "__table_args__": tables_args}) - resource_history_ext = type( - str("%s_history" % tablename), - (baseclass, base.ResourceHistoryExtMixin, base.ResourceHistory), - {"__tablename__": ("%s_history" % tablename), - "__table_args__": tables_args}) - return {'resource': resource_ext, - 'history': resource_history_ext, - 'updated_at': resource_type.updated_at} - - def get_classes(self, resource_type): - # NOTE(sileht): We don't care about concurrency here because we allow - # sqlalchemy to override its global object with extend_existing=True - # this is safe because classname and tablename are uuid. - try: - mappers = self._cache[resource_type.tablename] - # Cache is outdated - if (resource_type.name != "generic" - and resource_type.updated_at > mappers['updated_at']): - for table_purpose in ['resource', 'history']: - Base.metadata.remove(Base.metadata.tables[ - mappers[table_purpose].__tablename__]) - del self._cache[resource_type.tablename] - raise KeyError - return mappers - except KeyError: - mapper = self._build_class_mappers(resource_type) - self._cache[resource_type.tablename] = mapper - return mapper - - @retry_on_deadlock - def map_and_create_tables(self, resource_type, facade): - if resource_type.state != "creating": - raise RuntimeError("map_and_create_tables must be called in state " - "creating") - - mappers = self.get_classes(resource_type) - tables = [Base.metadata.tables[mappers["resource"].__tablename__], - Base.metadata.tables[mappers["history"].__tablename__]] - - with facade.writer_connection() as connection: - Base.metadata.create_all(connection, tables=tables) - - # NOTE(sileht): no need to protect the _cache with a lock - # get_classes cannot be called in state creating - self._cache[resource_type.tablename] = mappers - - @retry_on_deadlock - def unmap_and_delete_tables(self, resource_type, facade): - if resource_type.state != "deleting": - raise RuntimeError("unmap_and_delete_tables must be called in " - "state deleting") - - mappers = self.get_classes(resource_type) - del self._cache[resource_type.tablename] - - tables = [Base.metadata.tables[mappers['resource'].__tablename__], - Base.metadata.tables[mappers['history'].__tablename__]] - - # NOTE(sileht): Base.metadata.drop_all doesn't - # issue CASCADE stuffs correctly at least on postgresql - # We drop foreign keys manually to not lock the destination - # table for too long during drop table. - # It's safe to not use a transaction since - # the resource_type table is already cleaned and committed - # so this code cannot be triggerred anymore for this - # resource_type - with facade.writer_connection() as connection: - for table in tables: - for fk in table.foreign_key_constraints: - try: - self._safe_execute( - connection, - sqlalchemy.schema.DropConstraint(fk)) - except exception.DBNonExistentConstraint: - pass - for table in tables: - try: - self._safe_execute(connection, - sqlalchemy.schema.DropTable(table)) - except exception.DBNonExistentTable: - pass - - # NOTE(sileht): If something goes wrong here, we are currently - # fucked, that why we expose the state to the superuser. - # But we allow him to delete a resource type in error state - # in case of he cleanup the mess manually and want gnocchi to - # control and finish the cleanup. - - # TODO(sileht): Remove this resource on other workers - # by using expiration on cache ? - for table in tables: - Base.metadata.remove(table) - - @retry_on_deadlock - def _safe_execute(self, connection, works): - # NOTE(sileht): we create a transaction to ensure mysql - # create locks on other transaction... - trans = connection.begin() - connection.execute(works) - trans.commit() - - -class SQLAlchemyIndexer(indexer.IndexerDriver): - _RESOURCE_TYPE_MANAGER = ResourceClassMapper() - - @classmethod - def _create_new_database(cls, url): - """Used by testing to create a new database.""" - purl = sqlalchemy_url.make_url( - cls.dress_url( - url)) - purl.database = purl.database + str(uuid.uuid4()).replace('-', '') - new_url = str(purl) - sqlalchemy_utils.create_database(new_url) - return new_url - - @staticmethod - def dress_url(url): - # If no explicit driver has been set, we default to pymysql - if url.startswith("mysql://"): - url = sqlalchemy_url.make_url(url) - url.drivername = "mysql+pymysql" - return str(url) - return url - - def __init__(self, conf): - conf.set_override("connection", - self.dress_url(conf.indexer.url), - "database") - self.conf = conf - self.facade = PerInstanceFacade(conf) - - def disconnect(self): - self.facade.dispose() - - def _get_alembic_config(self): - from alembic import config - - cfg = config.Config( - "%s/alembic/alembic.ini" % os.path.dirname(__file__)) - cfg.set_main_option('sqlalchemy.url', - self.conf.database.connection) - return cfg - - def get_engine(self): - return self.facade.get_engine() - - def upgrade(self, nocreate=False): - from alembic import command - from alembic import migration - - cfg = self._get_alembic_config() - cfg.conf = self.conf - if nocreate: - command.upgrade(cfg, "head") - else: - with self.facade.writer_connection() as connection: - ctxt = migration.MigrationContext.configure(connection) - current_version = ctxt.get_current_revision() - if current_version is None: - Base.metadata.create_all(connection) - command.stamp(cfg, "head") - else: - command.upgrade(cfg, "head") - - try: - with self.facade.writer() as session: - session.add( - ResourceType( - name="generic", - tablename="generic", - state="active", - attributes=resource_type.ResourceTypeAttributes())) - except exception.DBDuplicateEntry: - pass - - # NOTE(jd) We can have deadlock errors either here or later in - # map_and_create_tables(). We can't decorate create_resource_type() - # directly or each part might retry later on its own and cause a - # duplicate. And it seems there's no way to use the same session for - # both adding the resource_type in our table and calling - # map_and_create_tables() :-( - @retry_on_deadlock - def _add_resource_type(self, resource_type): - try: - with self.facade.writer() as session: - session.add(resource_type) - except exception.DBDuplicateEntry: - raise indexer.ResourceTypeAlreadyExists(resource_type.name) - - def create_resource_type(self, resource_type): - # NOTE(sileht): mysql have a stupid and small length limitation on the - # foreign key and index name, so we can't use the resource type name as - # tablename, the limit is 64. The longest name we have is - # fk__h_revision_rh_revision, - # so 64 - 26 = 38 and 3 chars for rt_, 35 chars, uuid is 32, it's cool. - tablename = "rt_%s" % uuid.uuid4().hex - resource_type = ResourceType(name=resource_type.name, - tablename=tablename, - attributes=resource_type.attributes, - state="creating") - - # NOTE(sileht): ensure the driver is able to store the request - # resource_type - resource_type.to_baseclass() - - self._add_resource_type(resource_type) - - try: - self._RESOURCE_TYPE_MANAGER.map_and_create_tables(resource_type, - self.facade) - except Exception: - # NOTE(sileht): We fail the DDL, we have no way to automatically - # recover, just set a particular state - self._set_resource_type_state(resource_type.name, "creation_error") - raise - - self._set_resource_type_state(resource_type.name, "active") - resource_type.state = "active" - return resource_type - - def update_resource_type(self, name, add_attributes=None, - del_attributes=None): - if not add_attributes and not del_attributes: - return - add_attributes = add_attributes or [] - del_attributes = del_attributes or [] - - self._set_resource_type_state(name, "updating", "active") - - try: - with self.facade.independent_writer() as session: - engine = session.connection() - rt = self._get_resource_type(session, name) - - with self.facade.writer_connection() as connection: - ctx = migration.MigrationContext.configure(connection) - op = operations.Operations(ctx) - for table in [rt.tablename, '%s_history' % rt.tablename]: - with op.batch_alter_table(table) as batch_op: - for attr in del_attributes: - batch_op.drop_column(attr) - for attr in add_attributes: - server_default = attr.for_filling( - engine.dialect) - batch_op.add_column(sqlalchemy.Column( - attr.name, attr.satype, - nullable=not attr.required, - server_default=server_default)) - - # We have all rows filled now, we can remove - # the server_default - if server_default is not None: - batch_op.alter_column( - column_name=attr.name, - existing_type=attr.satype, - existing_server_default=server_default, - existing_nullable=not attr.required, - server_default=None) - - rt.state = "active" - rt.updated_at = utils.utcnow() - rt.attributes.extend(add_attributes) - for attr in list(rt.attributes): - if attr.name in del_attributes: - rt.attributes.remove(attr) - # FIXME(sileht): yeah that's wierd but attributes is a custom - # json column and 'extend' doesn't trigger sql update, this - # enforce the update. I wonder if sqlalchemy provides something - # on column description side. - sqlalchemy.orm.attributes.flag_modified(rt, 'attributes') - - except Exception: - # NOTE(sileht): We fail the DDL, we have no way to automatically - # recover, just set a particular state - # TODO(sileht): Create a repair REST endpoint that delete - # columns not existing in the database but in the resource type - # description. This will allow to pass wrong update_error to active - # state, that currently not possible. - self._set_resource_type_state(name, "updating_error") - raise - - return rt - - def get_resource_type(self, name): - with self.facade.independent_reader() as session: - return self._get_resource_type(session, name) - - def _get_resource_type(self, session, name): - resource_type = session.query(ResourceType).get(name) - if not resource_type: - raise indexer.NoSuchResourceType(name) - return resource_type - - @retry_on_deadlock - def _set_resource_type_state(self, name, state, - expected_previous_state=None): - with self.facade.writer() as session: - q = session.query(ResourceType) - q = q.filter(ResourceType.name == name) - if expected_previous_state is not None: - q = q.filter(ResourceType.state == expected_previous_state) - update = q.update({'state': state}) - if update == 0: - if expected_previous_state is not None: - rt = session.query(ResourceType).get(name) - if rt: - raise indexer.UnexpectedResourceTypeState( - name, expected_previous_state, rt.state) - raise indexer.IndexerException( - "Fail to set resource type state of %s to %s" % - (name, state)) - - @staticmethod - def get_resource_type_schema(): - return base.RESOURCE_TYPE_SCHEMA_MANAGER - - @staticmethod - def get_resource_attributes_schemas(): - return [ext.plugin.schema() for ext in ResourceType.RESOURCE_SCHEMAS] - - def list_resource_types(self): - with self.facade.independent_reader() as session: - return list(session.query(ResourceType).order_by( - ResourceType.name.asc()).all()) - - # NOTE(jd) We can have deadlock errors either here or later in - # map_and_create_tables(). We can't decorate delete_resource_type() - # directly or each part might retry later on its own and cause a - # duplicate. And it seems there's no way to use the same session for - # both adding the resource_type in our table and calling - # map_and_create_tables() :-( - @retry_on_deadlock - def _mark_as_deleting_resource_type(self, name): - try: - with self.facade.writer() as session: - rt = self._get_resource_type(session, name) - if rt.state not in ["active", "deletion_error", - "creation_error", "updating_error"]: - raise indexer.UnexpectedResourceTypeState( - name, - "active/deletion_error/creation_error/updating_error", - rt.state) - session.delete(rt) - - # FIXME(sileht): Why do I need to flush here !!! - # I want remove/add in the same transaction !!! - session.flush() - - # NOTE(sileht): delete and recreate to: - # * raise duplicate constraints - # * ensure we do not create a new resource type - # with the same name while we destroy the tables next - rt = ResourceType(name=rt.name, - tablename=rt.tablename, - state="deleting", - attributes=rt.attributes) - session.add(rt) - except exception.DBReferenceError as e: - if (e.constraint in [ - 'fk_resource_resource_type_name', - 'fk_resource_history_resource_type_name', - 'fk_rh_resource_type_name']): - raise indexer.ResourceTypeInUse(name) - raise - return rt - - @retry_on_deadlock - def _delete_resource_type(self, name): - # Really delete the resource type, no resource can be linked to it - # Because we cannot add a resource to a resource_type not in 'active' - # state - with self.facade.writer() as session: - resource_type = self._get_resource_type(session, name) - session.delete(resource_type) - - def delete_resource_type(self, name): - if name == "generic": - raise indexer.ResourceTypeInUse(name) - - rt = self._mark_as_deleting_resource_type(name) - - try: - self._RESOURCE_TYPE_MANAGER.unmap_and_delete_tables( - rt, self.facade) - except Exception: - # NOTE(sileht): We fail the DDL, we have no way to automatically - # recover, just set a particular state - self._set_resource_type_state(rt.name, "deletion_error") - raise - - self._delete_resource_type(name) - - def _resource_type_to_mappers(self, session, name): - resource_type = self._get_resource_type(session, name) - if resource_type.state != "active": - raise indexer.UnexpectedResourceTypeState( - name, "active", resource_type.state) - return self._RESOURCE_TYPE_MANAGER.get_classes(resource_type) - - def list_archive_policies(self): - with self.facade.independent_reader() as session: - return list(session.query(ArchivePolicy).all()) - - def get_archive_policy(self, name): - with self.facade.independent_reader() as session: - return session.query(ArchivePolicy).get(name) - - def update_archive_policy(self, name, ap_items): - with self.facade.independent_writer() as session: - ap = session.query(ArchivePolicy).get(name) - if not ap: - raise indexer.NoSuchArchivePolicy(name) - current = sorted(ap.definition, - key=operator.attrgetter('granularity')) - new = sorted(ap_items, key=operator.attrgetter('granularity')) - if len(current) != len(new): - raise indexer.UnsupportedArchivePolicyChange( - name, 'Cannot add or drop granularities') - for c, n in zip(current, new): - if c.granularity != n.granularity: - raise indexer.UnsupportedArchivePolicyChange( - name, '%s granularity interval was changed' - % c.granularity) - # NOTE(gordc): ORM doesn't update JSON column unless new - ap.definition = ap_items - return ap - - def delete_archive_policy(self, name): - constraints = [ - "fk_metric_ap_name_ap_name", - "fk_apr_ap_name_ap_name"] - with self.facade.writer() as session: - try: - if session.query(ArchivePolicy).filter( - ArchivePolicy.name == name).delete() == 0: - raise indexer.NoSuchArchivePolicy(name) - except exception.DBReferenceError as e: - if e.constraint in constraints: - raise indexer.ArchivePolicyInUse(name) - raise - - def create_archive_policy(self, archive_policy): - ap = ArchivePolicy( - name=archive_policy.name, - back_window=archive_policy.back_window, - definition=archive_policy.definition, - aggregation_methods=list(archive_policy.aggregation_methods), - ) - try: - with self.facade.writer() as session: - session.add(ap) - except exception.DBDuplicateEntry: - raise indexer.ArchivePolicyAlreadyExists(archive_policy.name) - return ap - - def list_archive_policy_rules(self): - with self.facade.independent_reader() as session: - return session.query(ArchivePolicyRule).order_by( - ArchivePolicyRule.metric_pattern.desc()).all() - - def get_archive_policy_rule(self, name): - with self.facade.independent_reader() as session: - return session.query(ArchivePolicyRule).get(name) - - def delete_archive_policy_rule(self, name): - with self.facade.writer() as session: - if session.query(ArchivePolicyRule).filter( - ArchivePolicyRule.name == name).delete() == 0: - raise indexer.NoSuchArchivePolicyRule(name) - - def create_archive_policy_rule(self, name, metric_pattern, - archive_policy_name): - apr = ArchivePolicyRule( - name=name, - archive_policy_name=archive_policy_name, - metric_pattern=metric_pattern - ) - try: - with self.facade.writer() as session: - session.add(apr) - except exception.DBDuplicateEntry: - raise indexer.ArchivePolicyRuleAlreadyExists(name) - return apr - - @retry_on_deadlock - def create_metric(self, id, creator, archive_policy_name, - name=None, unit=None, resource_id=None): - m = Metric(id=id, - creator=creator, - archive_policy_name=archive_policy_name, - name=name, - unit=unit, - resource_id=resource_id) - try: - with self.facade.writer() as session: - session.add(m) - except exception.DBDuplicateEntry: - raise indexer.NamedMetricAlreadyExists(name) - except exception.DBReferenceError as e: - if (e.constraint == - 'fk_metric_ap_name_ap_name'): - raise indexer.NoSuchArchivePolicy(archive_policy_name) - if e.constraint == 'fk_metric_resource_id_resource_id': - raise indexer.NoSuchResource(resource_id) - raise - return m - - @retry_on_deadlock - def list_metrics(self, names=None, ids=None, details=False, - status='active', limit=None, marker=None, sorts=None, - creator=None, **kwargs): - sorts = sorts or [] - if ids is not None and not ids: - return [] - if names is not None and not names: - return [] - with self.facade.independent_reader() as session: - q = session.query(Metric).filter( - Metric.status == status) - if names is not None: - q = q.filter(Metric.name.in_(names)) - if ids is not None: - q = q.filter(Metric.id.in_(ids)) - if creator is not None: - if creator[0] == ":": - q = q.filter(Metric.creator.like("%%%s" % creator)) - elif creator[-1] == ":": - q = q.filter(Metric.creator.like("%s%%" % creator)) - else: - q = q.filter(Metric.creator == creator) - for attr in kwargs: - q = q.filter(getattr(Metric, attr) == kwargs[attr]) - if details: - q = q.options(sqlalchemy.orm.joinedload('resource')) - - sort_keys, sort_dirs = self._build_sort_keys(sorts) - - if marker: - metric_marker = self.list_metrics(ids=[marker]) - if metric_marker: - metric_marker = metric_marker[0] - else: - raise indexer.InvalidPagination( - "Invalid marker: `%s'" % marker) - else: - metric_marker = None - - try: - q = oslo_db_utils.paginate_query(q, Metric, limit=limit, - sort_keys=sort_keys, - marker=metric_marker, - sort_dirs=sort_dirs) - except ValueError as e: - raise indexer.InvalidPagination(e) - except exception.InvalidSortKey as e: - raise indexer.InvalidPagination(e) - - return list(q.all()) - - @retry_on_deadlock - def create_resource(self, resource_type, id, - creator, user_id=None, project_id=None, - started_at=None, ended_at=None, metrics=None, - original_resource_id=None, - **kwargs): - if (started_at is not None - and ended_at is not None - and started_at > ended_at): - raise ValueError( - "Start timestamp cannot be after end timestamp") - if original_resource_id is None: - original_resource_id = str(id) - with self.facade.writer() as session: - resource_cls = self._resource_type_to_mappers( - session, resource_type)['resource'] - r = resource_cls( - id=id, - original_resource_id=original_resource_id, - type=resource_type, - creator=creator, - user_id=user_id, - project_id=project_id, - started_at=started_at, - ended_at=ended_at, - **kwargs) - session.add(r) - try: - session.flush() - except exception.DBDuplicateEntry: - raise indexer.ResourceAlreadyExists(id) - except exception.DBReferenceError as ex: - raise indexer.ResourceValueError(r.type, - ex.key, - getattr(r, ex.key)) - if metrics is not None: - self._set_metrics_for_resource(session, r, metrics) - - # NOTE(jd) Force load of metrics :) - r.metrics - - return r - - @retry_on_deadlock - def update_resource(self, resource_type, - resource_id, ended_at=_marker, metrics=_marker, - append_metrics=False, - create_revision=True, - **kwargs): - with self.facade.writer() as session: - mappers = self._resource_type_to_mappers(session, resource_type) - resource_cls = mappers["resource"] - resource_history_cls = mappers["history"] - - try: - # NOTE(sileht): We use FOR UPDATE that is not galera friendly, - # but they are no other way to cleanly patch a resource and - # store the history that safe when two concurrent calls are - # done. - q = session.query(resource_cls).filter( - resource_cls.id == resource_id).with_for_update() - - r = q.first() - if r is None: - raise indexer.NoSuchResource(resource_id) - - if create_revision: - # Build history - rh = resource_history_cls() - for col in sqlalchemy.inspect(resource_cls).columns: - setattr(rh, col.name, getattr(r, col.name)) - now = utils.utcnow() - rh.revision_end = now - session.add(rh) - r.revision_start = now - - # Update the resource - if ended_at is not _marker: - # NOTE(jd) MySQL does not honor checks. I hate it. - engine = session.connection() - if engine.dialect.name == "mysql": - if r.started_at is not None and ended_at is not None: - if r.started_at > ended_at: - raise indexer.ResourceValueError( - resource_type, "ended_at", ended_at) - r.ended_at = ended_at - - if kwargs: - for attribute, value in six.iteritems(kwargs): - if hasattr(r, attribute): - setattr(r, attribute, value) - else: - raise indexer.ResourceAttributeError( - r.type, attribute) - - if metrics is not _marker: - if not append_metrics: - session.query(Metric).filter( - Metric.resource_id == resource_id, - Metric.status == 'active').update( - {"resource_id": None}) - self._set_metrics_for_resource(session, r, metrics) - - session.flush() - except exception.DBConstraintError as e: - if e.check_name == "ck_started_before_ended": - raise indexer.ResourceValueError( - resource_type, "ended_at", ended_at) - raise - - # NOTE(jd) Force load of metrics – do it outside the session! - r.metrics - - return r - - @staticmethod - def _set_metrics_for_resource(session, r, metrics): - for name, value in six.iteritems(metrics): - if isinstance(value, uuid.UUID): - try: - update = session.query(Metric).filter( - Metric.id == value, - Metric.status == 'active', - Metric.creator == r.creator, - ).update({"resource_id": r.id, "name": name}) - except exception.DBDuplicateEntry: - raise indexer.NamedMetricAlreadyExists(name) - if update == 0: - raise indexer.NoSuchMetric(value) - else: - unit = value.get('unit') - ap_name = value['archive_policy_name'] - m = Metric(id=uuid.uuid4(), - creator=r.creator, - archive_policy_name=ap_name, - name=name, - unit=unit, - resource_id=r.id) - session.add(m) - try: - session.flush() - except exception.DBDuplicateEntry: - raise indexer.NamedMetricAlreadyExists(name) - except exception.DBReferenceError as e: - if (e.constraint == - 'fk_metric_ap_name_ap_name'): - raise indexer.NoSuchArchivePolicy(ap_name) - raise - - session.expire(r, ['metrics']) - - @retry_on_deadlock - def delete_resource(self, resource_id): - with self.facade.writer() as session: - # We are going to delete the resource; the on delete will set the - # resource_id of the attached metrics to NULL, we just have to mark - # their status as 'delete' - session.query(Metric).filter( - Metric.resource_id == resource_id).update( - {"status": "delete"}) - if session.query(Resource).filter( - Resource.id == resource_id).delete() == 0: - raise indexer.NoSuchResource(resource_id) - - @retry_on_deadlock - def delete_resources(self, resource_type='generic', - attribute_filter=None): - if not attribute_filter: - raise ValueError("attribute_filter must be set") - - with self.facade.writer() as session: - target_cls = self._resource_type_to_mappers( - session, resource_type)["resource"] - - q = session.query(target_cls.id) - - engine = session.connection() - try: - f = QueryTransformer.build_filter(engine.dialect.name, - target_cls, - attribute_filter) - except indexer.QueryAttributeError as e: - # NOTE(jd) The QueryAttributeError does not know about - # resource_type, so convert it - raise indexer.ResourceAttributeError(resource_type, - e.attribute) - - q = q.filter(f) - - session.query(Metric).filter( - Metric.resource_id.in_(q) - ).update({"status": "delete"}, - synchronize_session=False) - return q.delete(synchronize_session=False) - - @retry_on_deadlock - def get_resource(self, resource_type, resource_id, with_metrics=False): - with self.facade.independent_reader() as session: - resource_cls = self._resource_type_to_mappers( - session, resource_type)['resource'] - q = session.query( - resource_cls).filter( - resource_cls.id == resource_id) - if with_metrics: - q = q.options(sqlalchemy.orm.joinedload('metrics')) - return q.first() - - def _get_history_result_mapper(self, session, resource_type): - mappers = self._resource_type_to_mappers(session, resource_type) - resource_cls = mappers['resource'] - history_cls = mappers['history'] - - resource_cols = {} - history_cols = {} - for col in sqlalchemy.inspect(history_cls).columns: - history_cols[col.name] = col - if col.name in ["revision", "revision_end"]: - value = None if col.name == "revision_end" else -1 - resource_cols[col.name] = sqlalchemy.bindparam( - col.name, value, col.type).label(col.name) - else: - resource_cols[col.name] = getattr(resource_cls, col.name) - s1 = sqlalchemy.select(history_cols.values()) - s2 = sqlalchemy.select(resource_cols.values()) - if resource_type != "generic": - s1 = s1.where(history_cls.revision == ResourceHistory.revision) - s2 = s2.where(resource_cls.id == Resource.id) - union_stmt = sqlalchemy.union(s1, s2) - stmt = union_stmt.alias("result") - - class Result(base.ResourceJsonifier, base.GnocchiBase): - def __iter__(self): - return iter((key, getattr(self, key)) for key in stmt.c.keys()) - - sqlalchemy.orm.mapper( - Result, stmt, primary_key=[stmt.c.id, stmt.c.revision], - properties={ - 'metrics': sqlalchemy.orm.relationship( - Metric, - primaryjoin=sqlalchemy.and_( - Metric.resource_id == stmt.c.id, - Metric.status == 'active'), - foreign_keys=Metric.resource_id) - }) - - return Result - - @retry_on_deadlock - def list_resources(self, resource_type='generic', - attribute_filter=None, - details=False, - history=False, - limit=None, - marker=None, - sorts=None): - sorts = sorts or [] - - with self.facade.independent_reader() as session: - if history: - target_cls = self._get_history_result_mapper( - session, resource_type) - else: - target_cls = self._resource_type_to_mappers( - session, resource_type)["resource"] - - q = session.query(target_cls) - - if attribute_filter: - engine = session.connection() - try: - f = QueryTransformer.build_filter(engine.dialect.name, - target_cls, - attribute_filter) - except indexer.QueryAttributeError as e: - # NOTE(jd) The QueryAttributeError does not know about - # resource_type, so convert it - raise indexer.ResourceAttributeError(resource_type, - e.attribute) - - q = q.filter(f) - - sort_keys, sort_dirs = self._build_sort_keys(sorts) - - if marker: - resource_marker = self.get_resource(resource_type, marker) - if resource_marker is None: - raise indexer.InvalidPagination( - "Invalid marker: `%s'" % marker) - else: - resource_marker = None - - try: - q = oslo_db_utils.paginate_query(q, target_cls, limit=limit, - sort_keys=sort_keys, - marker=resource_marker, - sort_dirs=sort_dirs) - except ValueError as e: - raise indexer.InvalidPagination(e) - except exception.InvalidSortKey as e: - raise indexer.InvalidPagination(e) - - # Always include metrics - q = q.options(sqlalchemy.orm.joinedload("metrics")) - all_resources = q.all() - - if details: - grouped_by_type = itertools.groupby( - all_resources, lambda r: (r.revision != -1, r.type)) - all_resources = [] - for (is_history, type), resources in grouped_by_type: - if type == 'generic': - # No need for a second query - all_resources.extend(resources) - else: - try: - target_cls = self._resource_type_to_mappers( - session, type)['history' if is_history else - 'resource'] - except (indexer.UnexpectedResourceTypeState, - indexer.NoSuchResourceType): - # NOTE(sileht): This resource_type have been - # removed in the meantime. - continue - if is_history: - f = target_cls.revision.in_([r.revision - for r in resources]) - else: - f = target_cls.id.in_([r.id for r in resources]) - - q = session.query(target_cls).filter(f) - # Always include metrics - q = q.options(sqlalchemy.orm.joinedload('metrics')) - try: - all_resources.extend(q.all()) - except sqlalchemy.exc.ProgrammingError as e: - # NOTE(jd) This exception can happen when the - # resources and their resource type have been - # deleted in the meantime: - # sqlalchemy.exc.ProgrammingError: - # (pymysql.err.ProgrammingError) - # (1146, "Table \'test.rt_f00\' doesn\'t exist") - # In that case, just ignore those resources. - if (not pymysql - or not isinstance( - e, sqlalchemy.exc.ProgrammingError) - or not isinstance( - e.orig, pymysql.err.ProgrammingError) - or (e.orig.args[0] - != pymysql.constants.ER.NO_SUCH_TABLE)): - raise - - return all_resources - - def expunge_metric(self, id): - with self.facade.writer() as session: - if session.query(Metric).filter(Metric.id == id).delete() == 0: - raise indexer.NoSuchMetric(id) - - def delete_metric(self, id): - with self.facade.writer() as session: - if session.query(Metric).filter( - Metric.id == id, Metric.status == 'active').update( - {"status": "delete"}) == 0: - raise indexer.NoSuchMetric(id) - - @staticmethod - def _build_sort_keys(sorts): - # transform the api-wg representation to the oslo.db one - sort_keys = [] - sort_dirs = [] - for sort in sorts: - sort_key, __, sort_dir = sort.partition(":") - sort_keys.append(sort_key.strip()) - sort_dirs.append(sort_dir or 'asc') - - # paginate_query require at list one uniq column - if 'id' not in sort_keys: - sort_keys.append('id') - sort_dirs.append('asc') - - return sort_keys, sort_dirs - - -class QueryTransformer(object): - unary_operators = { - u"not": sqlalchemy.not_, - } - - binary_operators = { - u"=": operator.eq, - u"==": operator.eq, - u"eq": operator.eq, - - u"<": operator.lt, - u"lt": operator.lt, - - u">": operator.gt, - u"gt": operator.gt, - - u"<=": operator.le, - u"≤": operator.le, - u"le": operator.le, - - u">=": operator.ge, - u"≥": operator.ge, - u"ge": operator.ge, - - u"!=": operator.ne, - u"≠": operator.ne, - u"ne": operator.ne, - - u"in": lambda field_name, values: field_name.in_(values), - - u"like": lambda field, value: field.like(value), - } - - multiple_operators = { - u"or": sqlalchemy.or_, - u"∨": sqlalchemy.or_, - - u"and": sqlalchemy.and_, - u"∧": sqlalchemy.and_, - } - - converters = ( - (base.TimestampUTC, utils.to_datetime), - (types.String, six.text_type), - (types.Integer, int), - (types.Numeric, float), - ) - - @classmethod - def _handle_multiple_op(cls, engine, table, op, nodes): - return op(*[ - cls.build_filter(engine, table, node) - for node in nodes - ]) - - @classmethod - def _handle_unary_op(cls, engine, table, op, node): - return op(cls.build_filter(engine, table, node)) - - @classmethod - def _handle_binary_op(cls, engine, table, op, nodes): - try: - field_name, value = list(nodes.items())[0] - except Exception: - raise indexer.QueryError() - - if field_name == "lifespan": - attr = getattr(table, "ended_at") - getattr(table, "started_at") - value = utils.to_timespan(value) - if engine == "mysql": - # NOTE(jd) So subtracting 2 timestamps in MySQL result in some - # weird results based on string comparison. It's useless and it - # does not work at all with seconds or anything. Just skip it. - raise exceptions.NotImplementedError - elif field_name == "created_by_user_id": - creator = getattr(table, "creator") - if op == operator.eq: - return creator.like("%s:%%" % value) - elif op == operator.ne: - return sqlalchemy.not_(creator.like("%s:%%" % value)) - elif op == cls.binary_operators[u"like"]: - return creator.like("%s:%%" % value) - raise indexer.QueryValueError(value, field_name) - elif field_name == "created_by_project_id": - creator = getattr(table, "creator") - if op == operator.eq: - return creator.like("%%:%s" % value) - elif op == operator.ne: - return sqlalchemy.not_(creator.like("%%:%s" % value)) - elif op == cls.binary_operators[u"like"]: - return creator.like("%%:%s" % value) - raise indexer.QueryValueError(value, field_name) - else: - try: - attr = getattr(table, field_name) - except AttributeError: - raise indexer.QueryAttributeError(table, field_name) - - if not hasattr(attr, "type"): - # This is not a column - raise indexer.QueryAttributeError(table, field_name) - - # Convert value to the right type - if value is not None: - for klass, converter in cls.converters: - if isinstance(attr.type, klass): - try: - if isinstance(value, list): - # we got a list for in_ operator - value = [converter(v) for v in value] - else: - value = converter(value) - except Exception: - raise indexer.QueryValueError(value, field_name) - break - - return op(attr, value) - - @classmethod - def build_filter(cls, engine, table, tree): - try: - operator, nodes = list(tree.items())[0] - except Exception: - raise indexer.QueryError() - - try: - op = cls.multiple_operators[operator] - except KeyError: - try: - op = cls.binary_operators[operator] - except KeyError: - try: - op = cls.unary_operators[operator] - except KeyError: - raise indexer.QueryInvalidOperator(operator) - return cls._handle_unary_op(engine, op, nodes) - return cls._handle_binary_op(engine, table, op, nodes) - return cls._handle_multiple_op(engine, table, op, nodes) diff --git a/gnocchi/indexer/sqlalchemy_base.py b/gnocchi/indexer/sqlalchemy_base.py deleted file mode 100644 index 1ebc60a9f..000000000 --- a/gnocchi/indexer/sqlalchemy_base.py +++ /dev/null @@ -1,443 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016 Red Hat, Inc. -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from __future__ import absolute_import -import calendar -import datetime -import decimal - -import iso8601 -from oslo_db.sqlalchemy import models -import six -import sqlalchemy -from sqlalchemy.dialects import mysql -from sqlalchemy.ext import declarative -from sqlalchemy import types -import sqlalchemy_utils - -from gnocchi import archive_policy -from gnocchi import indexer -from gnocchi import resource_type -from gnocchi import storage -from gnocchi import utils - -Base = declarative.declarative_base() - -COMMON_TABLES_ARGS = {'mysql_charset': "utf8", - 'mysql_engine': "InnoDB"} - - -class PreciseTimestamp(types.TypeDecorator): - """Represents a timestamp precise to the microsecond. - - Deprecated in favor of TimestampUTC. - Still used in alembic migrations. - """ - - impl = sqlalchemy.DateTime - - @staticmethod - def _decimal_to_dt(dec): - """Return a datetime from Decimal unixtime format.""" - if dec is None: - return None - - integer = int(dec) - micro = (dec - decimal.Decimal(integer)) * decimal.Decimal(1000000) - daittyme = datetime.datetime.utcfromtimestamp(integer) - return daittyme.replace(microsecond=int(round(micro))) - - @staticmethod - def _dt_to_decimal(utc): - """Datetime to Decimal. - - Some databases don't store microseconds in datetime - so we always store as Decimal unixtime. - """ - if utc is None: - return None - - decimal.getcontext().prec = 30 - return (decimal.Decimal(str(calendar.timegm(utc.utctimetuple()))) + - (decimal.Decimal(str(utc.microsecond)) / - decimal.Decimal("1000000.0"))) - - def load_dialect_impl(self, dialect): - if dialect.name == 'mysql': - return dialect.type_descriptor( - types.DECIMAL(precision=20, - scale=6, - asdecimal=True)) - return dialect.type_descriptor(self.impl) - - def compare_against_backend(self, dialect, conn_type): - if dialect.name == 'mysql': - return issubclass(type(conn_type), types.DECIMAL) - return issubclass(type(conn_type), type(self.impl)) - - def process_bind_param(self, value, dialect): - if value is not None: - value = utils.normalize_time(value) - if dialect.name == 'mysql': - return self._dt_to_decimal(value) - return value - - def process_result_value(self, value, dialect): - if dialect.name == 'mysql': - value = self._decimal_to_dt(value) - if value is not None: - return utils.normalize_time(value).replace( - tzinfo=iso8601.iso8601.UTC) - - -class TimestampUTC(types.TypeDecorator): - """Represents a timestamp precise to the microsecond.""" - - impl = sqlalchemy.DateTime - - def load_dialect_impl(self, dialect): - if dialect.name == 'mysql': - return dialect.type_descriptor(mysql.DATETIME(fsp=6)) - return self.impl - - def process_bind_param(self, value, dialect): - if value is not None: - return utils.normalize_time(value) - - def process_result_value(self, value, dialect): - if value is not None: - return value.replace(tzinfo=iso8601.iso8601.UTC) - - -class GnocchiBase(models.ModelBase): - __table_args__ = ( - COMMON_TABLES_ARGS, - ) - - -class ArchivePolicyDefinitionType(sqlalchemy_utils.JSONType): - def process_result_value(self, value, dialect): - values = super(ArchivePolicyDefinitionType, - self).process_result_value(value, dialect) - return [archive_policy.ArchivePolicyItem(**v) for v in values] - - -class SetType(sqlalchemy_utils.JSONType): - def process_result_value(self, value, dialect): - return set(super(SetType, - self).process_result_value(value, dialect)) - - -class ArchivePolicy(Base, GnocchiBase, archive_policy.ArchivePolicy): - __tablename__ = 'archive_policy' - - name = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True) - back_window = sqlalchemy.Column(sqlalchemy.Integer, nullable=False) - definition = sqlalchemy.Column(ArchivePolicyDefinitionType, nullable=False) - # TODO(jd) Use an array of string instead, PostgreSQL can do that - aggregation_methods = sqlalchemy.Column(SetType, - nullable=False) - - -class Metric(Base, GnocchiBase, storage.Metric): - __tablename__ = 'metric' - __table_args__ = ( - sqlalchemy.Index('ix_metric_status', 'status'), - sqlalchemy.UniqueConstraint("resource_id", "name", - name="uniq_metric0resource_id0name"), - COMMON_TABLES_ARGS, - ) - - id = sqlalchemy.Column(sqlalchemy_utils.UUIDType(), - primary_key=True) - archive_policy_name = sqlalchemy.Column( - sqlalchemy.String(255), - sqlalchemy.ForeignKey( - 'archive_policy.name', - ondelete="RESTRICT", - name="fk_metric_ap_name_ap_name"), - nullable=False) - archive_policy = sqlalchemy.orm.relationship(ArchivePolicy, lazy="joined") - creator = sqlalchemy.Column(sqlalchemy.String(255)) - resource_id = sqlalchemy.Column( - sqlalchemy_utils.UUIDType(), - sqlalchemy.ForeignKey('resource.id', - ondelete="SET NULL", - name="fk_metric_resource_id_resource_id")) - name = sqlalchemy.Column(sqlalchemy.String(255)) - unit = sqlalchemy.Column(sqlalchemy.String(31)) - status = sqlalchemy.Column(sqlalchemy.Enum('active', 'delete', - name="metric_status_enum"), - nullable=False, - server_default='active') - - def jsonify(self): - d = { - "id": self.id, - "creator": self.creator, - "name": self.name, - "unit": self.unit, - } - unloaded = sqlalchemy.inspect(self).unloaded - if 'resource' in unloaded: - d['resource_id'] = self.resource_id - else: - d['resource'] = self.resource - if 'archive_policy' in unloaded: - d['archive_policy_name'] = self.archive_policy_name - else: - d['archive_policy'] = self.archive_policy - - if self.creator is None: - d['created_by_user_id'] = d['created_by_project_id'] = None - else: - d['created_by_user_id'], _, d['created_by_project_id'] = ( - self.creator.partition(":") - ) - - return d - - def __eq__(self, other): - # NOTE(jd) If `other` is a SQL Metric, we only compare - # archive_policy_name, and we don't compare archive_policy that might - # not be loaded. Otherwise we fallback to the original comparison for - # storage.Metric. - return ((isinstance(other, Metric) - and self.id == other.id - and self.archive_policy_name == other.archive_policy_name - and self.creator == other.creator - and self.name == other.name - and self.unit == other.unit - and self.resource_id == other.resource_id) - or (storage.Metric.__eq__(self, other))) - - __hash__ = storage.Metric.__hash__ - - -RESOURCE_TYPE_SCHEMA_MANAGER = resource_type.ResourceTypeSchemaManager( - "gnocchi.indexer.sqlalchemy.resource_type_attribute") - - -class ResourceTypeAttributes(sqlalchemy_utils.JSONType): - def process_bind_param(self, attributes, dialect): - return super(ResourceTypeAttributes, self).process_bind_param( - attributes.jsonify(), dialect) - - def process_result_value(self, value, dialect): - attributes = super(ResourceTypeAttributes, self).process_result_value( - value, dialect) - return RESOURCE_TYPE_SCHEMA_MANAGER.attributes_from_dict(attributes) - - -class ResourceType(Base, GnocchiBase, resource_type.ResourceType): - __tablename__ = 'resource_type' - __table_args__ = ( - sqlalchemy.UniqueConstraint("tablename", - name="uniq_resource_type0tablename"), - COMMON_TABLES_ARGS, - ) - - name = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True, - nullable=False) - tablename = sqlalchemy.Column(sqlalchemy.String(35), nullable=False) - attributes = sqlalchemy.Column(ResourceTypeAttributes) - state = sqlalchemy.Column(sqlalchemy.Enum("active", "creating", - "creation_error", "deleting", - "deletion_error", "updating", - "updating_error", - name="resource_type_state_enum"), - nullable=False, - server_default="creating") - updated_at = sqlalchemy.Column(TimestampUTC, nullable=False, - # NOTE(jd): We would like to use - # sqlalchemy.func.now, but we can't - # because the type of PreciseTimestamp in - # MySQL is not a Timestamp, so it would - # not store a timestamp but a date as an - # integer. - default=lambda: utils.utcnow()) - - def to_baseclass(self): - cols = {} - for attr in self.attributes: - cols[attr.name] = sqlalchemy.Column(attr.satype, - nullable=not attr.required) - return type(str("%s_base" % self.tablename), (object, ), cols) - - -class ResourceJsonifier(indexer.Resource): - def jsonify(self): - d = dict(self) - del d['revision'] - if 'metrics' not in sqlalchemy.inspect(self).unloaded: - d['metrics'] = dict((m.name, six.text_type(m.id)) - for m in self.metrics) - - if self.creator is None: - d['created_by_user_id'] = d['created_by_project_id'] = None - else: - d['created_by_user_id'], _, d['created_by_project_id'] = ( - self.creator.partition(":") - ) - - return d - - -class ResourceMixin(ResourceJsonifier): - @declarative.declared_attr - def __table_args__(cls): - return (sqlalchemy.CheckConstraint('started_at <= ended_at', - name="ck_started_before_ended"), - COMMON_TABLES_ARGS) - - @declarative.declared_attr - def type(cls): - return sqlalchemy.Column( - sqlalchemy.String(255), - sqlalchemy.ForeignKey('resource_type.name', - ondelete="RESTRICT", - name="fk_%s_resource_type_name" % - cls.__tablename__), - nullable=False) - - creator = sqlalchemy.Column(sqlalchemy.String(255)) - started_at = sqlalchemy.Column(TimestampUTC, nullable=False, - default=lambda: utils.utcnow()) - revision_start = sqlalchemy.Column(TimestampUTC, nullable=False, - default=lambda: utils.utcnow()) - ended_at = sqlalchemy.Column(TimestampUTC) - user_id = sqlalchemy.Column(sqlalchemy.String(255)) - project_id = sqlalchemy.Column(sqlalchemy.String(255)) - original_resource_id = sqlalchemy.Column(sqlalchemy.String(255), - nullable=False) - - -class Resource(ResourceMixin, Base, GnocchiBase): - __tablename__ = 'resource' - _extra_keys = ['revision', 'revision_end'] - revision = -1 - id = sqlalchemy.Column(sqlalchemy_utils.UUIDType(), - primary_key=True) - revision_end = None - metrics = sqlalchemy.orm.relationship( - Metric, backref="resource", - primaryjoin="and_(Resource.id == Metric.resource_id, " - "Metric.status == 'active')") - - def get_metric(self, metric_name): - m = super(Resource, self).get_metric(metric_name) - if m: - if sqlalchemy.orm.session.object_session(self): - # NOTE(jd) The resource is already loaded so that should not - # trigger a SELECT - m.resource - return m - - -class ResourceHistory(ResourceMixin, Base, GnocchiBase): - __tablename__ = 'resource_history' - - revision = sqlalchemy.Column(sqlalchemy.Integer, autoincrement=True, - primary_key=True) - id = sqlalchemy.Column(sqlalchemy_utils.UUIDType(), - sqlalchemy.ForeignKey( - 'resource.id', - ondelete="CASCADE", - name="fk_rh_id_resource_id"), - nullable=False) - revision_end = sqlalchemy.Column(TimestampUTC, nullable=False, - default=lambda: utils.utcnow()) - metrics = sqlalchemy.orm.relationship( - Metric, primaryjoin="Metric.resource_id == ResourceHistory.id", - foreign_keys='Metric.resource_id') - - -class ResourceExt(object): - """Default extension class for plugin - - Used for plugin that doesn't need additional columns - """ - - -class ResourceExtMixin(object): - @declarative.declared_attr - def __table_args__(cls): - return (COMMON_TABLES_ARGS, ) - - @declarative.declared_attr - def id(cls): - tablename_compact = cls.__tablename__ - if tablename_compact.endswith("_history"): - tablename_compact = tablename_compact[:-6] - return sqlalchemy.Column( - sqlalchemy_utils.UUIDType(), - sqlalchemy.ForeignKey( - 'resource.id', - ondelete="CASCADE", - name="fk_%s_id_resource_id" % tablename_compact, - # NOTE(sileht): We use to ensure that postgresql - # does not use AccessExclusiveLock on destination table - use_alter=True), - primary_key=True - ) - - -class ResourceHistoryExtMixin(object): - @declarative.declared_attr - def __table_args__(cls): - return (COMMON_TABLES_ARGS, ) - - @declarative.declared_attr - def revision(cls): - tablename_compact = cls.__tablename__ - if tablename_compact.endswith("_history"): - tablename_compact = tablename_compact[:-6] - return sqlalchemy.Column( - sqlalchemy.Integer, - sqlalchemy.ForeignKey( - 'resource_history.revision', - ondelete="CASCADE", - name="fk_%s_revision_rh_revision" - % tablename_compact, - # NOTE(sileht): We use to ensure that postgresql - # does not use AccessExclusiveLock on destination table - use_alter=True), - primary_key=True - ) - - -class HistoryModelIterator(models.ModelIterator): - def __next__(self): - # NOTE(sileht): Our custom resource attribute columns don't - # have the same name in database than in sqlalchemy model - # so remove the additional "f_" for the model name - n = six.advance_iterator(self.i) - model_attr = n[2:] if n[:2] == "f_" else n - return model_attr, getattr(self.model, n) - - -class ArchivePolicyRule(Base, GnocchiBase): - __tablename__ = 'archive_policy_rule' - - name = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True) - archive_policy_name = sqlalchemy.Column( - sqlalchemy.String(255), - sqlalchemy.ForeignKey( - 'archive_policy.name', - ondelete="RESTRICT", - name="fk_apr_ap_name_ap_name"), - nullable=False) - metric_pattern = sqlalchemy.Column(sqlalchemy.String(255), nullable=False) diff --git a/gnocchi/indexer/sqlalchemy_extension.py b/gnocchi/indexer/sqlalchemy_extension.py deleted file mode 100644 index bc4d84181..000000000 --- a/gnocchi/indexer/sqlalchemy_extension.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- encoding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import - -import sqlalchemy -import sqlalchemy_utils - -from gnocchi import resource_type - - -class SchemaMixin(object): - def for_filling(self, dialect): - # NOTE(sileht): This must be used only for patching resource type - # to fill all row with a default value and then switch back the - # server_default to None - if self.fill is None: - return None - - # NOTE(sileht): server_default must be converted in sql element - return sqlalchemy.literal(self.fill) - - -class StringSchema(resource_type.StringSchema, SchemaMixin): - @property - def satype(self): - return sqlalchemy.String(self.max_length) - - -class UUIDSchema(resource_type.UUIDSchema, SchemaMixin): - satype = sqlalchemy_utils.UUIDType() - - def for_filling(self, dialect): - if self.fill is None: - return False # Don't set any server_default - return sqlalchemy.literal( - self.satype.process_bind_param(self.fill, dialect)) - - -class NumberSchema(resource_type.NumberSchema, SchemaMixin): - satype = sqlalchemy.Float(53) - - -class BoolSchema(resource_type.BoolSchema, SchemaMixin): - satype = sqlalchemy.Boolean diff --git a/gnocchi/indexer/sqlalchemy_legacy_resources.py b/gnocchi/indexer/sqlalchemy_legacy_resources.py deleted file mode 100644 index 8390476bb..000000000 --- a/gnocchi/indexer/sqlalchemy_legacy_resources.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- encoding: utf-8 -*- - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# NOTE(sileht): this code is also in alembic migration -ceilometer_tablenames = { - "instance_network_interface": "instance_net_int", - "host_network_interface": "host_net_int", -} -ceilometer_resources = { - "generic": {}, - "image": { - "name": {"type": "string", "min_length": 0, "max_length": 255, - "required": True}, - "container_format": {"type": "string", "min_length": 0, - "max_length": 255, "required": True}, - "disk_format": {"type": "string", "min_length": 0, "max_length": 255, - "required": True}, - }, - "instance": { - "flavor_id": {"type": "string", "min_length": 0, "max_length": 255, - "required": True}, - "image_ref": {"type": "string", "min_length": 0, "max_length": 255, - "required": False}, - "host": {"type": "string", "min_length": 0, "max_length": 255, - "required": True}, - "display_name": {"type": "string", "min_length": 0, "max_length": 255, - "required": True}, - "server_group": {"type": "string", "min_length": 0, "max_length": 255, - "required": False}, - }, - "instance_disk": { - "name": {"type": "string", "min_length": 0, "max_length": 255, - "required": True}, - "instance_id": {"type": "uuid", "required": True}, - }, - "instance_network_interface": { - "name": {"type": "string", "min_length": 0, "max_length": 255, - "required": True}, - "instance_id": {"type": "uuid", "required": True}, - }, - "volume": { - "display_name": {"type": "string", "min_length": 0, "max_length": 255, - "required": False}, - }, - "swift_account": {}, - "ceph_account": {}, - "network": {}, - "identity": {}, - "ipmi": {}, - "stack": {}, - "host": { - "host_name": {"type": "string", "min_length": 0, "max_length": 255, - "required": True}, - }, - "host_network_interface": { - "host_name": {"type": "string", "min_length": 0, "max_length": 255, - "required": True}, - "device_name": {"type": "string", "min_length": 0, "max_length": 255, - "required": False}, - }, - "host_disk": { - "host_name": {"type": "string", "min_length": 0, "max_length": 255, - "required": True}, - "device_name": {"type": "string", "min_length": 0, "max_length": 255, - "required": False}, - }, -} diff --git a/gnocchi/json.py b/gnocchi/json.py deleted file mode 100644 index eb5fa9248..000000000 --- a/gnocchi/json.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2015-2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime -import uuid - -import numpy -import six -import ujson - - -def to_primitive(obj): - if isinstance(obj, ((six.text_type,) - + six.integer_types - + (type(None), bool, float))): - return obj - if isinstance(obj, uuid.UUID): - return six.text_type(obj) - if isinstance(obj, datetime.datetime): - return obj.isoformat() - if isinstance(obj, numpy.datetime64): - # Do not include nanoseconds if null - return str(obj).rpartition(".000000000")[0] + "+00:00" - # This mimics what Pecan implements in its default JSON encoder - if hasattr(obj, "jsonify"): - return to_primitive(obj.jsonify()) - if isinstance(obj, dict): - return {to_primitive(k): to_primitive(v) - for k, v in obj.items()} - if hasattr(obj, 'iteritems'): - return to_primitive(dict(obj.iteritems())) - # Python 3 does not have iteritems - if hasattr(obj, 'items'): - return to_primitive(dict(obj.items())) - if hasattr(obj, '__iter__'): - return list(map(to_primitive, obj)) - return obj - - -def dumps(obj): - return ujson.dumps(to_primitive(obj)) - - -# For convenience -loads = ujson.loads -load = ujson.load diff --git a/gnocchi/opts.py b/gnocchi/opts.py deleted file mode 100644 index 023138da5..000000000 --- a/gnocchi/opts.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy -import itertools -import operator -import pkg_resources -import uuid - -from oslo_config import cfg -from oslo_middleware import cors - -import gnocchi.archive_policy -import gnocchi.indexer -import gnocchi.storage -import gnocchi.storage.ceph -import gnocchi.storage.file -import gnocchi.storage.redis -import gnocchi.storage.s3 -import gnocchi.storage.swift - - -# NOTE(sileht): The oslo.config interpolation is buggy when the value -# is None, this replaces it by the expected empty string. -# Fix will perhaps be fixed by https://review.openstack.org/#/c/417496/ -# But it seems some projects are relaying on the bug... -class CustomStrSubWrapper(cfg.ConfigOpts.StrSubWrapper): - def __getitem__(self, key): - value = super(CustomStrSubWrapper, self).__getitem__(key) - if value is None: - return '' - return value - -cfg.ConfigOpts.StrSubWrapper = CustomStrSubWrapper - - -_STORAGE_OPTS = list(itertools.chain(gnocchi.storage.OPTS, - gnocchi.storage.ceph.OPTS, - gnocchi.storage.file.OPTS, - gnocchi.storage.swift.OPTS, - gnocchi.storage.redis.OPTS, - gnocchi.storage.s3.OPTS)) - - -_INCOMING_OPTS = copy.deepcopy(_STORAGE_OPTS) -for opt in _INCOMING_OPTS: - opt.default = '${storage.%s}' % opt.name - - -def list_opts(): - return [ - ("indexer", gnocchi.indexer.OPTS), - ("metricd", ( - cfg.IntOpt('workers', min=1, - required=True, - help='Number of workers for Gnocchi metric daemons. ' - 'By default the available number of CPU is used.'), - cfg.IntOpt('metric_processing_delay', - default=60, - required=True, - deprecated_group='storage', - help="How many seconds to wait between " - "scheduling new metrics to process"), - cfg.IntOpt('metric_reporting_delay', - deprecated_group='storage', - default=120, - min=-1, - required=True, - help="How many seconds to wait between " - "metric ingestion reporting. Set value to -1 to " - "disable reporting"), - cfg.IntOpt('metric_cleanup_delay', - deprecated_group='storage', - default=300, - required=True, - help="How many seconds to wait between " - "cleaning of expired data"), - cfg.IntOpt('worker_sync_rate', - default=30, - help="Frequency to detect when metricd workers join or " - "leave system (in seconds). A shorter rate, may " - "improve rebalancing but create more coordination " - "load"), - cfg.IntOpt('processing_replicas', - default=3, - min=1, - help="Number of workers that share a task. A higher " - "value may improve worker utilization but may also " - "increase load on coordination backend. Value is " - "capped by number of workers globally."), - )), - ("api", ( - cfg.StrOpt('paste_config', - default="api-paste.ini", - help='Path to API Paste configuration.'), - cfg.StrOpt('auth_mode', - default="basic", - choices=list(map(operator.attrgetter("name"), - pkg_resources.iter_entry_points( - "gnocchi.rest.auth_helper"))), - help='Authentication mode to use.'), - cfg.IntOpt('max_limit', - default=1000, - required=True, - help=('The maximum number of items returned in a ' - 'single response from a collection resource')), - cfg.IntOpt('refresh_timeout', - default=10, min=0, - help='Number of seconds before timeout when attempting ' - 'to force refresh of metric.'), - )), - ("storage", (_STORAGE_OPTS + gnocchi.storage._carbonara.OPTS)), - ("incoming", _INCOMING_OPTS), - ("statsd", ( - cfg.HostAddressOpt('host', - default='0.0.0.0', - help='The listen IP for statsd'), - cfg.PortOpt('port', - default=8125, - help='The port for statsd'), - cfg.Opt( - 'resource_id', - type=uuid.UUID, - help='Resource UUID to use to identify statsd in Gnocchi'), - cfg.StrOpt( - 'user_id', - deprecated_for_removal=True, - help='User ID to use to identify statsd in Gnocchi'), - cfg.StrOpt( - 'project_id', - deprecated_for_removal=True, - help='Project ID to use to identify statsd in Gnocchi'), - cfg.StrOpt( - 'creator', - default="${statsd.user_id}:${statsd.project_id}", - help='Creator value to use to identify statsd in Gnocchi'), - cfg.StrOpt( - 'archive_policy_name', - help='Archive policy name to use when creating metrics'), - cfg.FloatOpt( - 'flush_delay', - default=10, - help='Delay between flushes'), - )), - ("archive_policy", gnocchi.archive_policy.OPTS), - ] - - -def set_defaults(): - cfg.set_defaults(cors.CORS_OPTS, - allow_headers=[ - 'X-Auth-Token', - 'X-Subject-Token', - 'X-User-Id', - 'X-Domain-Id', - 'X-Project-Id', - 'X-Roles']) diff --git a/gnocchi/resource_type.py b/gnocchi/resource_type.py deleted file mode 100644 index 73b755648..000000000 --- a/gnocchi/resource_type.py +++ /dev/null @@ -1,266 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import numbers -import re -import six -import stevedore -import voluptuous - -from gnocchi import utils - - -INVALID_NAMES = [ - "id", "type", "metrics", - "revision", "revision_start", "revision_end", - "started_at", "ended_at", - "user_id", "project_id", - "created_by_user_id", "created_by_project_id", "get_metric", - "creator", -] - -VALID_CHARS = re.compile("[a-zA-Z0-9][a-zA-Z0-9_]*") - - -class InvalidResourceAttribute(ValueError): - pass - - -class InvalidResourceAttributeName(InvalidResourceAttribute): - """Error raised when the resource attribute name is invalid.""" - def __init__(self, name): - super(InvalidResourceAttributeName, self).__init__( - "Resource attribute name %s is invalid" % str(name)) - self.name = name - - -class InvalidResourceAttributeValue(InvalidResourceAttribute): - """Error raised when the resource attribute min is greater than max""" - def __init__(self, min, max): - super(InvalidResourceAttributeValue, self).__init__( - "Resource attribute value min (or min_length) %s must be less " - "than or equal to max (or max_length) %s!" % (str(min), str(max))) - self.min = min - self.max = max - - -class InvalidResourceAttributeOption(InvalidResourceAttribute): - """Error raised when the resource attribute name is invalid.""" - def __init__(self, name, option, reason): - super(InvalidResourceAttributeOption, self).__init__( - "Option '%s' of resource attribute %s is invalid: %s" % - (option, str(name), str(reason))) - self.name = name - self.option = option - self.reason = reason - - -# NOTE(sileht): This is to store the behavior of some operations: -# * fill, to set a default value to all existing resource type -# -# in the future for example, we can allow to change the length of -# a string attribute, if the new one is shorter, we can add a option -# to define the behavior like: -# * resize = trunc or reject -OperationOptions = { - voluptuous.Optional('fill'): object -} - - -class CommonAttributeSchema(object): - meta_schema_ext = {} - schema_ext = None - - def __init__(self, type, name, required, options=None): - if (len(name) > 63 or name in INVALID_NAMES - or not VALID_CHARS.match(name)): - raise InvalidResourceAttributeName(name) - - self.name = name - self.required = required - self.fill = None - - # options is set only when we update a resource type - if options is not None: - fill = options.get("fill") - if fill is None and required: - raise InvalidResourceAttributeOption( - name, "fill", "must not be empty if required=True") - elif fill is not None: - # Ensure fill have the correct attribute type - try: - self.fill = voluptuous.Schema(self.schema_ext)(fill) - except voluptuous.Error as e: - raise InvalidResourceAttributeOption(name, "fill", e) - - @classmethod - def meta_schema(cls, for_update=False): - d = { - voluptuous.Required('type'): cls.typename, - voluptuous.Required('required', default=True): bool - } - if for_update: - d[voluptuous.Required('options', default={})] = OperationOptions - if callable(cls.meta_schema_ext): - d.update(cls.meta_schema_ext()) - else: - d.update(cls.meta_schema_ext) - return d - - def schema(self): - if self.required: - return {self.name: self.schema_ext} - else: - return {voluptuous.Optional(self.name): self.schema_ext} - - def jsonify(self): - return {"type": self.typename, - "required": self.required} - - -class StringSchema(CommonAttributeSchema): - typename = "string" - - def __init__(self, min_length, max_length, *args, **kwargs): - if min_length > max_length: - raise InvalidResourceAttributeValue(min_length, max_length) - - self.min_length = min_length - self.max_length = max_length - super(StringSchema, self).__init__(*args, **kwargs) - - meta_schema_ext = { - voluptuous.Required('min_length', default=0): - voluptuous.All(int, voluptuous.Range(min=0, max=255)), - voluptuous.Required('max_length', default=255): - voluptuous.All(int, voluptuous.Range(min=1, max=255)) - } - - @property - def schema_ext(self): - return voluptuous.All(six.text_type, - voluptuous.Length( - min=self.min_length, - max=self.max_length)) - - def jsonify(self): - d = super(StringSchema, self).jsonify() - d.update({"max_length": self.max_length, - "min_length": self.min_length}) - return d - - -class UUIDSchema(CommonAttributeSchema): - typename = "uuid" - schema_ext = staticmethod(utils.UUID) - - -class NumberSchema(CommonAttributeSchema): - typename = "number" - - def __init__(self, min, max, *args, **kwargs): - if max is not None and min is not None and min > max: - raise InvalidResourceAttributeValue(min, max) - self.min = min - self.max = max - super(NumberSchema, self).__init__(*args, **kwargs) - - meta_schema_ext = { - voluptuous.Required('min', default=None): voluptuous.Any( - None, numbers.Real), - voluptuous.Required('max', default=None): voluptuous.Any( - None, numbers.Real) - } - - @property - def schema_ext(self): - return voluptuous.All(numbers.Real, - voluptuous.Range(min=self.min, - max=self.max)) - - def jsonify(self): - d = super(NumberSchema, self).jsonify() - d.update({"min": self.min, "max": self.max}) - return d - - -class BoolSchema(CommonAttributeSchema): - typename = "bool" - schema_ext = bool - - -class ResourceTypeAttributes(list): - def jsonify(self): - d = {} - for attr in self: - d[attr.name] = attr.jsonify() - return d - - -class ResourceTypeSchemaManager(stevedore.ExtensionManager): - def __init__(self, *args, **kwargs): - super(ResourceTypeSchemaManager, self).__init__(*args, **kwargs) - type_schemas = tuple([ext.plugin.meta_schema() - for ext in self.extensions]) - self._schema = voluptuous.Schema({ - "name": six.text_type, - voluptuous.Required("attributes", default={}): { - six.text_type: voluptuous.Any(*tuple(type_schemas)) - } - }) - - type_schemas = tuple([ext.plugin.meta_schema(for_update=True) - for ext in self.extensions]) - self._schema_for_update = voluptuous.Schema({ - "name": six.text_type, - voluptuous.Required("attributes", default={}): { - six.text_type: voluptuous.Any(*tuple(type_schemas)) - } - }) - - def __call__(self, definition): - return self._schema(definition) - - def for_update(self, definition): - return self._schema_for_update(definition) - - def attributes_from_dict(self, attributes): - return ResourceTypeAttributes( - self[attr["type"]].plugin(name=name, **attr) - for name, attr in attributes.items()) - - def resource_type_from_dict(self, name, attributes, state): - return ResourceType(name, self.attributes_from_dict(attributes), state) - - -class ResourceType(object): - def __init__(self, name, attributes, state): - self.name = name - self.attributes = attributes - self.state = state - - @property - def schema(self): - schema = {} - for attr in self.attributes: - schema.update(attr.schema()) - return schema - - def __eq__(self, other): - return self.name == other.name - - def jsonify(self): - return {"name": self.name, - "attributes": self.attributes.jsonify(), - "state": self.state} diff --git a/gnocchi/rest/__init__.py b/gnocchi/rest/__init__.py deleted file mode 100644 index 42e9bc413..000000000 --- a/gnocchi/rest/__init__.py +++ /dev/null @@ -1,1785 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016 Red Hat, Inc. -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import functools -import itertools -import uuid - -import jsonpatch -import pecan -from pecan import rest -import pyparsing -import six -from six.moves.urllib import parse as urllib_parse -from stevedore import extension -import voluptuous -import webob.exc -import werkzeug.http - -from gnocchi import aggregates -from gnocchi import archive_policy -from gnocchi import indexer -from gnocchi import json -from gnocchi import resource_type -from gnocchi import storage -from gnocchi.storage import incoming -from gnocchi import utils - - -def arg_to_list(value): - if isinstance(value, list): - return value - elif value: - return [value] - return [] - - -def abort(status_code, detail='', headers=None, comment=None, **kw): - """Like pecan.abort, but make sure detail is a string.""" - if status_code == 404 and not detail: - raise RuntimeError("http code 404 must have 'detail' set") - if isinstance(detail, Exception): - detail = six.text_type(detail) - return pecan.abort(status_code, detail, headers, comment, **kw) - - -def flatten_dict_to_keypairs(d, separator=':'): - """Generator that produces sequence of keypairs for nested dictionaries. - - :param d: dictionaries which may be nested - :param separator: symbol between names - """ - for name, value in sorted(six.iteritems(d)): - if isinstance(value, dict): - for subname, subvalue in flatten_dict_to_keypairs(value, - separator): - yield ('%s%s%s' % (name, separator, subname), subvalue) - else: - yield name, value - - -def enforce(rule, target): - """Return the user and project the request should be limited to. - - :param rule: The rule name - :param target: The target to enforce on. - - """ - creds = pecan.request.auth_helper.get_auth_info(pecan.request.headers) - - if not isinstance(target, dict): - if hasattr(target, "jsonify"): - target = target.jsonify() - else: - target = target.__dict__ - - # Flatten dict - target = dict(flatten_dict_to_keypairs(d=target, separator='.')) - - if not pecan.request.policy_enforcer.enforce(rule, target, creds): - abort(403) - - -def set_resp_location_hdr(location): - location = '%s%s' % (pecan.request.script_name, location) - # NOTE(sileht): according the pep-3333 the headers must be - # str in py2 and py3 even this is not the same thing in both - # version - # see: http://legacy.python.org/dev/peps/pep-3333/#unicode-issues - if six.PY2 and isinstance(location, six.text_type): - location = location.encode('utf-8') - location = urllib_parse.quote(location) - pecan.response.headers['Location'] = location - - -def deserialize(expected_content_types=None): - if expected_content_types is None: - expected_content_types = ("application/json", ) - - mime_type, options = werkzeug.http.parse_options_header( - pecan.request.headers.get('Content-Type')) - if mime_type not in expected_content_types: - abort(415) - try: - params = json.load(pecan.request.body_file) - except Exception as e: - abort(400, "Unable to decode body: " + six.text_type(e)) - return params - - -def deserialize_and_validate(schema, required=True, - expected_content_types=None): - try: - return voluptuous.Schema(schema, required=required)( - deserialize(expected_content_types=expected_content_types)) - except voluptuous.Error as e: - abort(400, "Invalid input: %s" % e) - - -def PositiveOrNullInt(value): - value = int(value) - if value < 0: - raise ValueError("Value must be positive") - return value - - -def PositiveNotNullInt(value): - value = int(value) - if value <= 0: - raise ValueError("Value must be positive and not null") - return value - - -def Timespan(value): - return utils.to_timespan(value).total_seconds() - - -def get_header_option(name, params): - type, options = werkzeug.http.parse_options_header( - pecan.request.headers.get('Accept')) - return strtobool('Accept header' if name in options else name, - options.get(name, params.pop(name, 'false'))) - - -def get_history(params): - return get_header_option('history', params) - - -def get_details(params): - return get_header_option('details', params) - - -def strtobool(varname, v): - """Convert a string to a boolean. - - Default to false if unable to convert. - """ - try: - return utils.strtobool(v) - except ValueError as e: - abort(400, "Unable to parse `%s': %s" % (varname, six.text_type(e))) - - -RESOURCE_DEFAULT_PAGINATION = ['revision_start:asc', - 'started_at:asc'] - -METRIC_DEFAULT_PAGINATION = ['id:asc'] - - -def get_pagination_options(params, default): - max_limit = pecan.request.conf.api.max_limit - limit = params.pop('limit', max_limit) - marker = params.pop('marker', None) - sorts = params.pop('sort', default) - if not isinstance(sorts, list): - sorts = [sorts] - - try: - limit = PositiveNotNullInt(limit) - except ValueError: - abort(400, "Invalid 'limit' value: %s" % params.get('limit')) - - limit = min(limit, max_limit) - - return {'limit': limit, - 'marker': marker, - 'sorts': sorts} - - -def ValidAggMethod(value): - value = six.text_type(value) - if value in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS_VALUES: - return value - raise ValueError("Invalid aggregation method") - - -class ArchivePolicyController(rest.RestController): - def __init__(self, archive_policy): - self.archive_policy = archive_policy - - @pecan.expose('json') - def get(self): - ap = pecan.request.indexer.get_archive_policy(self.archive_policy) - if ap: - enforce("get archive policy", ap) - return ap - abort(404, indexer.NoSuchArchivePolicy(self.archive_policy)) - - @pecan.expose('json') - def patch(self): - ap = pecan.request.indexer.get_archive_policy(self.archive_policy) - if not ap: - abort(404, indexer.NoSuchArchivePolicy(self.archive_policy)) - enforce("update archive policy", ap) - - body = deserialize_and_validate(voluptuous.Schema({ - voluptuous.Required("definition"): - voluptuous.All([{ - "granularity": Timespan, - "points": PositiveNotNullInt, - "timespan": Timespan}], voluptuous.Length(min=1)), - })) - # Validate the data - try: - ap_items = [archive_policy.ArchivePolicyItem(**item) for item in - body['definition']] - except ValueError as e: - abort(400, e) - - try: - return pecan.request.indexer.update_archive_policy( - self.archive_policy, ap_items) - except indexer.UnsupportedArchivePolicyChange as e: - abort(400, e) - - @pecan.expose() - def delete(self): - # NOTE(jd) I don't think there's any point in fetching and passing the - # archive policy here, as the rule is probably checking the actual role - # of the user, not the content of the AP. - enforce("delete archive policy", {}) - try: - pecan.request.indexer.delete_archive_policy(self.archive_policy) - except indexer.NoSuchArchivePolicy as e: - abort(404, e) - except indexer.ArchivePolicyInUse as e: - abort(400, e) - - -class ArchivePoliciesController(rest.RestController): - @pecan.expose() - def _lookup(self, archive_policy, *remainder): - return ArchivePolicyController(archive_policy), remainder - - @pecan.expose('json') - def post(self): - # NOTE(jd): Initialize this one at run-time because we rely on conf - conf = pecan.request.conf - enforce("create archive policy", {}) - ArchivePolicySchema = voluptuous.Schema({ - voluptuous.Required("name"): six.text_type, - voluptuous.Required("back_window", default=0): PositiveOrNullInt, - voluptuous.Required( - "aggregation_methods", - default=set(conf.archive_policy.default_aggregation_methods)): - [ValidAggMethod], - voluptuous.Required("definition"): - voluptuous.All([{ - "granularity": Timespan, - "points": PositiveNotNullInt, - "timespan": Timespan, - }], voluptuous.Length(min=1)), - }) - - body = deserialize_and_validate(ArchivePolicySchema) - # Validate the data - try: - ap = archive_policy.ArchivePolicy.from_dict(body) - except ValueError as e: - abort(400, e) - enforce("create archive policy", ap) - try: - ap = pecan.request.indexer.create_archive_policy(ap) - except indexer.ArchivePolicyAlreadyExists as e: - abort(409, e) - - location = "/archive_policy/" + ap.name - set_resp_location_hdr(location) - pecan.response.status = 201 - return ap - - @pecan.expose('json') - def get_all(self): - enforce("list archive policy", {}) - return pecan.request.indexer.list_archive_policies() - - -class ArchivePolicyRulesController(rest.RestController): - @pecan.expose('json') - def post(self): - enforce("create archive policy rule", {}) - ArchivePolicyRuleSchema = voluptuous.Schema({ - voluptuous.Required("name"): six.text_type, - voluptuous.Required("metric_pattern"): six.text_type, - voluptuous.Required("archive_policy_name"): six.text_type, - }) - - body = deserialize_and_validate(ArchivePolicyRuleSchema) - enforce("create archive policy rule", body) - try: - ap = pecan.request.indexer.create_archive_policy_rule( - body['name'], body['metric_pattern'], - body['archive_policy_name'] - ) - except indexer.ArchivePolicyRuleAlreadyExists as e: - abort(409, e) - - location = "/archive_policy_rule/" + ap.name - set_resp_location_hdr(location) - pecan.response.status = 201 - return ap - - @pecan.expose('json') - def get_one(self, name): - ap = pecan.request.indexer.get_archive_policy_rule(name) - if ap: - enforce("get archive policy rule", ap) - return ap - abort(404, indexer.NoSuchArchivePolicyRule(name)) - - @pecan.expose('json') - def get_all(self): - enforce("list archive policy rule", {}) - return pecan.request.indexer.list_archive_policy_rules() - - @pecan.expose() - def delete(self, name): - # NOTE(jd) I don't think there's any point in fetching and passing the - # archive policy rule here, as the rule is probably checking the actual - # role of the user, not the content of the AP rule. - enforce("delete archive policy rule", {}) - try: - pecan.request.indexer.delete_archive_policy_rule(name) - except indexer.NoSuchArchivePolicyRule as e: - abort(404, e) - except indexer.ArchivePolicyRuleInUse as e: - abort(400, e) - - -def MeasuresListSchema(measures): - try: - times = utils.to_timestamps((m['timestamp'] for m in measures)) - except TypeError: - abort(400, "Invalid format for measures") - except ValueError as e: - abort(400, "Invalid input for timestamp: %s" % e) - - try: - values = [float(i['value']) for i in measures] - except Exception: - abort(400, "Invalid input for a value") - - return (storage.Measure(t, v) for t, v in six.moves.zip( - times.tolist(), values)) - - -class MetricController(rest.RestController): - _custom_actions = { - 'measures': ['POST', 'GET'] - } - - def __init__(self, metric): - self.metric = metric - mgr = extension.ExtensionManager(namespace='gnocchi.aggregates', - invoke_on_load=True) - self.custom_agg = dict((x.name, x.obj) for x in mgr) - - def enforce_metric(self, rule): - enforce(rule, json.to_primitive(self.metric)) - - @pecan.expose('json') - def get_all(self): - self.enforce_metric("get metric") - return self.metric - - @pecan.expose() - def post_measures(self): - self.enforce_metric("post measures") - params = deserialize() - if not isinstance(params, list): - abort(400, "Invalid input for measures") - if params: - pecan.request.storage.incoming.add_measures( - self.metric, MeasuresListSchema(params)) - pecan.response.status = 202 - - @pecan.expose('json') - def get_measures(self, start=None, stop=None, aggregation='mean', - granularity=None, resample=None, refresh=False, **param): - self.enforce_metric("get measures") - if not (aggregation - in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS - or aggregation in self.custom_agg): - msg = '''Invalid aggregation value %(agg)s, must be one of %(std)s - or %(custom)s''' - abort(400, msg % dict( - agg=aggregation, - std=archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS, - custom=str(self.custom_agg.keys()))) - - if start is not None: - try: - start = utils.to_datetime(start) - except Exception: - abort(400, "Invalid value for start") - - if stop is not None: - try: - stop = utils.to_datetime(stop) - except Exception: - abort(400, "Invalid value for stop") - - if resample: - if not granularity: - abort(400, 'A granularity must be specified to resample') - try: - resample = Timespan(resample) - except ValueError as e: - abort(400, e) - - if (strtobool("refresh", refresh) and - pecan.request.storage.incoming.has_unprocessed(self.metric)): - try: - pecan.request.storage.refresh_metric( - pecan.request.indexer, self.metric, - pecan.request.conf.api.refresh_timeout) - except storage.SackLockTimeoutError as e: - abort(503, e) - try: - if aggregation in self.custom_agg: - measures = self.custom_agg[aggregation].compute( - pecan.request.storage, self.metric, - start, stop, **param) - else: - measures = pecan.request.storage.get_measures( - self.metric, start, stop, aggregation, - Timespan(granularity) if granularity is not None else None, - resample) - # Replace timestamp keys by their string versions - return [(timestamp.isoformat(), offset, v) - for timestamp, offset, v in measures] - except (storage.MetricDoesNotExist, - storage.GranularityDoesNotExist, - storage.AggregationDoesNotExist) as e: - abort(404, e) - except aggregates.CustomAggFailure as e: - abort(400, e) - - @pecan.expose() - def delete(self): - self.enforce_metric("delete metric") - try: - pecan.request.indexer.delete_metric(self.metric.id) - except indexer.NoSuchMetric as e: - abort(404, e) - - -class MetricsController(rest.RestController): - - @pecan.expose() - def _lookup(self, id, *remainder): - try: - metric_id = uuid.UUID(id) - except ValueError: - abort(404, indexer.NoSuchMetric(id)) - metrics = pecan.request.indexer.list_metrics( - id=metric_id, details=True) - if not metrics: - abort(404, indexer.NoSuchMetric(id)) - return MetricController(metrics[0]), remainder - - _MetricSchema = voluptuous.Schema({ - "archive_policy_name": six.text_type, - "name": six.text_type, - voluptuous.Optional("unit"): - voluptuous.All(six.text_type, voluptuous.Length(max=31)), - }) - - # NOTE(jd) Define this method as it was a voluptuous schema – it's just a - # smarter version of a voluptuous schema, no? - @classmethod - def MetricSchema(cls, definition): - # First basic validation - definition = cls._MetricSchema(definition) - archive_policy_name = definition.get('archive_policy_name') - - name = definition.get('name') - if name and '/' in name: - abort(400, "'/' is not supported in metric name") - if archive_policy_name is None: - try: - ap = pecan.request.indexer.get_archive_policy_for_metric(name) - except indexer.NoArchivePolicyRuleMatch: - # NOTE(jd) Since this is a schema-like function, we - # should/could raise ValueError, but if we do so, voluptuous - # just returns a "invalid value" with no useful message – so we - # prefer to use abort() to make sure the user has the right - # error message - abort(400, "No archive policy name specified " - "and no archive policy rule found matching " - "the metric name %s" % name) - else: - definition['archive_policy_name'] = ap.name - - creator = pecan.request.auth_helper.get_current_user( - pecan.request.headers) - - enforce("create metric", { - "creator": creator, - "archive_policy_name": archive_policy_name, - "name": name, - "unit": definition.get('unit'), - }) - - return definition - - @pecan.expose('json') - def post(self): - creator = pecan.request.auth_helper.get_current_user( - pecan.request.headers) - body = deserialize_and_validate(self.MetricSchema) - try: - m = pecan.request.indexer.create_metric( - uuid.uuid4(), - creator, - name=body.get('name'), - unit=body.get('unit'), - archive_policy_name=body['archive_policy_name']) - except indexer.NoSuchArchivePolicy as e: - abort(400, e) - set_resp_location_hdr("/metric/" + str(m.id)) - pecan.response.status = 201 - return m - - MetricListSchema = voluptuous.Schema({ - "user_id": six.text_type, - "project_id": six.text_type, - "creator": six.text_type, - "limit": six.text_type, - "name": six.text_type, - "id": six.text_type, - "unit": six.text_type, - "archive_policy_name": six.text_type, - "status": voluptuous.Any("active", "delete"), - "sort": voluptuous.Any([six.text_type], six.text_type), - "marker": six.text_type, - }) - - @classmethod - @pecan.expose('json') - def get_all(cls, **kwargs): - kwargs = cls.MetricListSchema(kwargs) - - # Compat with old user/project API - provided_user_id = kwargs.pop('user_id', None) - provided_project_id = kwargs.pop('project_id', None) - if provided_user_id is None and provided_project_id is None: - provided_creator = kwargs.pop('creator', None) - else: - provided_creator = ( - (provided_user_id or "") - + ":" - + (provided_project_id or "") - ) - try: - enforce("list all metric", {}) - except webob.exc.HTTPForbidden: - enforce("list metric", {}) - creator = pecan.request.auth_helper.get_current_user( - pecan.request.headers) - if provided_creator and creator != provided_creator: - abort(403, "Insufficient privileges to filter by user/project") - attr_filter = {} - if provided_creator is not None: - attr_filter['creator'] = provided_creator - attr_filter.update(get_pagination_options( - kwargs, METRIC_DEFAULT_PAGINATION)) - attr_filter.update(kwargs) - try: - return pecan.request.indexer.list_metrics(**attr_filter) - except indexer.IndexerException as e: - abort(400, e) - - -_MetricsSchema = voluptuous.Schema({ - six.text_type: voluptuous.Any(utils.UUID, - MetricsController.MetricSchema), -}) - - -def MetricsSchema(data): - # NOTE(jd) Before doing any kind of validation, copy the metric name - # into the metric definition. This is required so we have the name - # available when doing the metric validation with its own MetricSchema, - # and so we can do things such as applying archive policy rules. - if isinstance(data, dict): - for metric_name, metric_def in six.iteritems(data): - if isinstance(metric_def, dict): - metric_def['name'] = metric_name - return _MetricsSchema(data) - - -class NamedMetricController(rest.RestController): - def __init__(self, resource_id, resource_type): - self.resource_id = resource_id - self.resource_type = resource_type - - @pecan.expose() - def _lookup(self, name, *remainder): - details = True if pecan.request.method == 'GET' else False - m = pecan.request.indexer.list_metrics(details=details, - name=name, - resource_id=self.resource_id) - if m: - return MetricController(m[0]), remainder - - resource = pecan.request.indexer.get_resource(self.resource_type, - self.resource_id) - if resource: - abort(404, indexer.NoSuchMetric(name)) - else: - abort(404, indexer.NoSuchResource(self.resource_id)) - - @pecan.expose() - def post(self): - resource = pecan.request.indexer.get_resource( - self.resource_type, self.resource_id) - if not resource: - abort(404, indexer.NoSuchResource(self.resource_id)) - enforce("update resource", resource) - metrics = deserialize_and_validate(MetricsSchema) - try: - pecan.request.indexer.update_resource( - self.resource_type, self.resource_id, metrics=metrics, - append_metrics=True, - create_revision=False) - except (indexer.NoSuchMetric, - indexer.NoSuchArchivePolicy, - ValueError) as e: - abort(400, e) - except indexer.NamedMetricAlreadyExists as e: - abort(409, e) - except indexer.NoSuchResource as e: - abort(404, e) - - @pecan.expose('json') - def get_all(self): - resource = pecan.request.indexer.get_resource( - self.resource_type, self.resource_id) - if not resource: - abort(404, indexer.NoSuchResource(self.resource_id)) - enforce("get resource", resource) - return pecan.request.indexer.list_metrics(resource_id=self.resource_id) - - -class ResourceHistoryController(rest.RestController): - def __init__(self, resource_id, resource_type): - self.resource_id = resource_id - self.resource_type = resource_type - - @pecan.expose('json') - def get(self, **kwargs): - details = get_details(kwargs) - pagination_opts = get_pagination_options( - kwargs, RESOURCE_DEFAULT_PAGINATION) - - resource = pecan.request.indexer.get_resource( - self.resource_type, self.resource_id) - if not resource: - abort(404, indexer.NoSuchResource(self.resource_id)) - - enforce("get resource", resource) - - try: - # FIXME(sileht): next API version should returns - # {'resources': [...], 'links': [ ... pagination rel ...]} - return pecan.request.indexer.list_resources( - self.resource_type, - attribute_filter={"=": {"id": self.resource_id}}, - details=details, - history=True, - **pagination_opts - ) - except indexer.IndexerException as e: - abort(400, e) - - -def etag_precondition_check(obj): - etag, lastmodified = obj.etag, obj.lastmodified - # NOTE(sileht): Checks and order come from rfc7232 - # in webob, the '*' and the absent of the header is handled by - # if_match.__contains__() and if_none_match.__contains__() - # and are identique... - if etag not in pecan.request.if_match: - abort(412) - elif (not pecan.request.environ.get("HTTP_IF_MATCH") - and pecan.request.if_unmodified_since - and pecan.request.if_unmodified_since < lastmodified): - abort(412) - - if etag in pecan.request.if_none_match: - if pecan.request.method in ['GET', 'HEAD']: - abort(304) - else: - abort(412) - elif (not pecan.request.environ.get("HTTP_IF_NONE_MATCH") - and pecan.request.if_modified_since - and (pecan.request.if_modified_since >= - lastmodified) - and pecan.request.method in ['GET', 'HEAD']): - abort(304) - - -def etag_set_headers(obj): - pecan.response.etag = obj.etag - pecan.response.last_modified = obj.lastmodified - - -def AttributesPath(value): - if value.startswith("/attributes"): - return value - raise ValueError("Only attributes can be modified") - - -ResourceTypeJsonPatchSchema = voluptuous.Schema([{ - "op": voluptuous.Any("add", "remove"), - "path": AttributesPath, - voluptuous.Optional("value"): dict, -}]) - - -class ResourceTypeController(rest.RestController): - def __init__(self, name): - self._name = name - - @pecan.expose('json') - def get(self): - try: - rt = pecan.request.indexer.get_resource_type(self._name) - except indexer.NoSuchResourceType as e: - abort(404, e) - enforce("get resource type", rt) - return rt - - @pecan.expose('json') - def patch(self): - # NOTE(sileht): should we check for "application/json-patch+json" - # Content-Type ? - - try: - rt = pecan.request.indexer.get_resource_type(self._name) - except indexer.NoSuchResourceType as e: - abort(404, e) - enforce("update resource type", rt) - - # Ensure this is a valid jsonpatch dict - patch = deserialize_and_validate( - ResourceTypeJsonPatchSchema, - expected_content_types=["application/json-patch+json"]) - - # Add new attributes to the resource type - rt_json_current = rt.jsonify() - try: - rt_json_next = jsonpatch.apply_patch(rt_json_current, patch) - except jsonpatch.JsonPatchException as e: - abort(400, e) - del rt_json_next['state'] - - # Validate that the whole new resource_type is valid - schema = pecan.request.indexer.get_resource_type_schema() - try: - rt_json_next = voluptuous.Schema(schema.for_update, required=True)( - rt_json_next) - except voluptuous.Error as e: - abort(400, "Invalid input: %s" % e) - - # Get only newly formatted and deleted attributes - add_attrs = {k: v for k, v in rt_json_next["attributes"].items() - if k not in rt_json_current["attributes"]} - del_attrs = [k for k in rt_json_current["attributes"] - if k not in rt_json_next["attributes"]] - - if not add_attrs and not del_attrs: - # NOTE(sileht): just returns the resource, the asked changes - # just do nothing - return rt - - try: - add_attrs = schema.attributes_from_dict(add_attrs) - except resource_type.InvalidResourceAttribute as e: - abort(400, "Invalid input: %s" % e) - - try: - return pecan.request.indexer.update_resource_type( - self._name, add_attributes=add_attrs, - del_attributes=del_attrs) - except indexer.NoSuchResourceType as e: - abort(400, e) - - @pecan.expose() - def delete(self): - try: - pecan.request.indexer.get_resource_type(self._name) - except indexer.NoSuchResourceType as e: - abort(404, e) - enforce("delete resource type", resource_type) - try: - pecan.request.indexer.delete_resource_type(self._name) - except (indexer.NoSuchResourceType, - indexer.ResourceTypeInUse) as e: - abort(400, e) - - -class ResourceTypesController(rest.RestController): - - @pecan.expose() - def _lookup(self, name, *remainder): - return ResourceTypeController(name), remainder - - @pecan.expose('json') - def post(self): - schema = pecan.request.indexer.get_resource_type_schema() - body = deserialize_and_validate(schema) - body["state"] = "creating" - - try: - rt = schema.resource_type_from_dict(**body) - except resource_type.InvalidResourceAttribute as e: - abort(400, "Invalid input: %s" % e) - - enforce("create resource type", body) - try: - rt = pecan.request.indexer.create_resource_type(rt) - except indexer.ResourceTypeAlreadyExists as e: - abort(409, e) - set_resp_location_hdr("/resource_type/" + rt.name) - pecan.response.status = 201 - return rt - - @pecan.expose('json') - def get_all(self, **kwargs): - enforce("list resource type", {}) - try: - return pecan.request.indexer.list_resource_types() - except indexer.IndexerException as e: - abort(400, e) - - -def ResourceSchema(schema): - base_schema = { - voluptuous.Optional('started_at'): utils.to_datetime, - voluptuous.Optional('ended_at'): utils.to_datetime, - voluptuous.Optional('user_id'): voluptuous.Any(None, six.text_type), - voluptuous.Optional('project_id'): voluptuous.Any(None, six.text_type), - voluptuous.Optional('metrics'): MetricsSchema, - } - base_schema.update(schema) - return base_schema - - -class ResourceController(rest.RestController): - - def __init__(self, resource_type, id): - self._resource_type = resource_type - creator = pecan.request.auth_helper.get_current_user( - pecan.request.headers) - try: - self.id = utils.ResourceUUID(id, creator) - except ValueError: - abort(404, indexer.NoSuchResource(id)) - self.metric = NamedMetricController(str(self.id), self._resource_type) - self.history = ResourceHistoryController(str(self.id), - self._resource_type) - - @pecan.expose('json') - def get(self): - resource = pecan.request.indexer.get_resource( - self._resource_type, self.id, with_metrics=True) - if resource: - enforce("get resource", resource) - etag_precondition_check(resource) - etag_set_headers(resource) - return resource - abort(404, indexer.NoSuchResource(self.id)) - - @pecan.expose('json') - def patch(self): - resource = pecan.request.indexer.get_resource( - self._resource_type, self.id, with_metrics=True) - if not resource: - abort(404, indexer.NoSuchResource(self.id)) - enforce("update resource", resource) - etag_precondition_check(resource) - - body = deserialize_and_validate( - schema_for(self._resource_type), - required=False) - - if len(body) == 0: - etag_set_headers(resource) - return resource - - for k, v in six.iteritems(body): - if k != 'metrics' and getattr(resource, k) != v: - create_revision = True - break - else: - if 'metrics' not in body: - # No need to go further, we assume the db resource - # doesn't change between the get and update - return resource - create_revision = False - - try: - resource = pecan.request.indexer.update_resource( - self._resource_type, - self.id, - create_revision=create_revision, - **body) - except (indexer.NoSuchMetric, - indexer.NoSuchArchivePolicy, - ValueError) as e: - abort(400, e) - except indexer.NoSuchResource as e: - abort(404, e) - etag_set_headers(resource) - return resource - - @pecan.expose() - def delete(self): - resource = pecan.request.indexer.get_resource( - self._resource_type, self.id) - if not resource: - abort(404, indexer.NoSuchResource(self.id)) - enforce("delete resource", resource) - etag_precondition_check(resource) - try: - pecan.request.indexer.delete_resource(self.id) - except indexer.NoSuchResource as e: - abort(404, e) - - -def schema_for(resource_type): - resource_type = pecan.request.indexer.get_resource_type(resource_type) - return ResourceSchema(resource_type.schema) - - -def ResourceUUID(value, creator): - try: - return utils.ResourceUUID(value, creator) - except ValueError as e: - raise voluptuous.Invalid(e) - - -def ResourceID(value, creator): - return (six.text_type(value), ResourceUUID(value, creator)) - - -class ResourcesController(rest.RestController): - def __init__(self, resource_type): - self._resource_type = resource_type - - @pecan.expose() - def _lookup(self, id, *remainder): - return ResourceController(self._resource_type, id), remainder - - @pecan.expose('json') - def post(self): - # NOTE(sileht): we need to copy the dict because when change it - # and we don't want that next patch call have the "id" - schema = dict(schema_for(self._resource_type)) - creator = pecan.request.auth_helper.get_current_user( - pecan.request.headers) - schema["id"] = functools.partial(ResourceID, creator=creator) - - body = deserialize_and_validate(schema) - body["original_resource_id"], body["id"] = body["id"] - - target = { - "resource_type": self._resource_type, - } - target.update(body) - enforce("create resource", target) - rid = body['id'] - del body['id'] - try: - resource = pecan.request.indexer.create_resource( - self._resource_type, rid, creator, - **body) - except (ValueError, - indexer.NoSuchMetric, - indexer.NoSuchArchivePolicy) as e: - abort(400, e) - except indexer.ResourceAlreadyExists as e: - abort(409, e) - set_resp_location_hdr("/resource/" - + self._resource_type + "/" - + six.text_type(resource.id)) - etag_set_headers(resource) - pecan.response.status = 201 - return resource - - @pecan.expose('json') - def get_all(self, **kwargs): - details = get_details(kwargs) - history = get_history(kwargs) - pagination_opts = get_pagination_options( - kwargs, RESOURCE_DEFAULT_PAGINATION) - policy_filter = pecan.request.auth_helper.get_resource_policy_filter( - pecan.request.headers, "list resource", self._resource_type) - - try: - # FIXME(sileht): next API version should returns - # {'resources': [...], 'links': [ ... pagination rel ...]} - return pecan.request.indexer.list_resources( - self._resource_type, - attribute_filter=policy_filter, - details=details, - history=history, - **pagination_opts - ) - except indexer.IndexerException as e: - abort(400, e) - - @pecan.expose('json') - def delete(self, **kwargs): - # NOTE(sileht): Don't allow empty filter, this is going to delete - # the entire database. - attr_filter = deserialize_and_validate(ResourceSearchSchema) - - # the voluptuous checks everything, but it is better to - # have this here. - if not attr_filter: - abort(400, "caution: the query can not be empty, or it will \ - delete entire database") - - policy_filter = pecan.request.auth_helper.get_resource_policy_filter( - pecan.request.headers, - "delete resources", self._resource_type) - - if policy_filter: - attr_filter = {"and": [policy_filter, attr_filter]} - - try: - delete_num = pecan.request.indexer.delete_resources( - self._resource_type, attribute_filter=attr_filter) - except indexer.IndexerException as e: - abort(400, e) - - return {"deleted": delete_num} - - -class ResourcesByTypeController(rest.RestController): - @pecan.expose('json') - def get_all(self): - return dict( - (rt.name, - pecan.request.application_url + '/resource/' + rt.name) - for rt in pecan.request.indexer.list_resource_types()) - - @pecan.expose() - def _lookup(self, resource_type, *remainder): - try: - pecan.request.indexer.get_resource_type(resource_type) - except indexer.NoSuchResourceType as e: - abort(404, e) - return ResourcesController(resource_type), remainder - - -class InvalidQueryStringSearchAttrFilter(Exception): - def __init__(self, reason): - super(InvalidQueryStringSearchAttrFilter, self).__init__( - "Invalid filter: %s" % reason) - - -class QueryStringSearchAttrFilter(object): - uninary_operators = ("not", ) - binary_operator = (u">=", u"<=", u"!=", u">", u"<", u"=", u"==", u"eq", - u"ne", u"lt", u"gt", u"ge", u"le", u"in", u"like", u"≠", - u"≥", u"≤") - multiple_operators = (u"and", u"or", u"∧", u"∨") - - operator = pyparsing.Regex(u"|".join(binary_operator)) - null = pyparsing.Regex("None|none|null").setParseAction( - pyparsing.replaceWith(None)) - boolean = "False|True|false|true" - boolean = pyparsing.Regex(boolean).setParseAction( - lambda t: t[0].lower() == "true") - hex_string = lambda n: pyparsing.Word(pyparsing.hexnums, exact=n) - uuid_string = pyparsing.Combine( - hex_string(8) + (pyparsing.Optional("-") + hex_string(4)) * 3 + - pyparsing.Optional("-") + hex_string(12)) - number = r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?" - number = pyparsing.Regex(number).setParseAction(lambda t: float(t[0])) - identifier = pyparsing.Word(pyparsing.alphas, pyparsing.alphanums + "_") - quoted_string = pyparsing.QuotedString('"') | pyparsing.QuotedString("'") - comparison_term = pyparsing.Forward() - in_list = pyparsing.Group( - pyparsing.Suppress('[') + - pyparsing.Optional(pyparsing.delimitedList(comparison_term)) + - pyparsing.Suppress(']'))("list") - comparison_term << (null | boolean | uuid_string | identifier | number | - quoted_string | in_list) - condition = pyparsing.Group(comparison_term + operator + comparison_term) - - expr = pyparsing.infixNotation(condition, [ - ("not", 1, pyparsing.opAssoc.RIGHT, ), - ("and", 2, pyparsing.opAssoc.LEFT, ), - ("∧", 2, pyparsing.opAssoc.LEFT, ), - ("or", 2, pyparsing.opAssoc.LEFT, ), - ("∨", 2, pyparsing.opAssoc.LEFT, ), - ]) - - @classmethod - def _parsed_query2dict(cls, parsed_query): - result = None - while parsed_query: - part = parsed_query.pop() - if part in cls.binary_operator: - result = {part: {parsed_query.pop(): result}} - - elif part in cls.multiple_operators: - if result.get(part): - result[part].append( - cls._parsed_query2dict(parsed_query.pop())) - else: - result = {part: [result]} - - elif part in cls.uninary_operators: - result = {part: result} - elif isinstance(part, pyparsing.ParseResults): - kind = part.getName() - if kind == "list": - res = part.asList() - else: - res = cls._parsed_query2dict(part) - if result is None: - result = res - elif isinstance(result, dict): - list(result.values())[0].append(res) - else: - result = part - return result - - @classmethod - def parse(cls, query): - try: - parsed_query = cls.expr.parseString(query, parseAll=True)[0] - except pyparsing.ParseException as e: - raise InvalidQueryStringSearchAttrFilter(six.text_type(e)) - return cls._parsed_query2dict(parsed_query) - - -def ResourceSearchSchema(v): - return _ResourceSearchSchema()(v) - - -def _ResourceSearchSchema(): - user = pecan.request.auth_helper.get_current_user( - pecan.request.headers) - _ResourceUUID = functools.partial(ResourceUUID, creator=user) - - return voluptuous.Schema( - voluptuous.All( - voluptuous.Length(min=0, max=1), - { - voluptuous.Any( - u"=", u"==", u"eq", - u"<", u"lt", - u">", u"gt", - u"<=", u"≤", u"le", - u">=", u"≥", u"ge", - u"!=", u"≠", u"ne", - u"in", - u"like", - ): voluptuous.All( - voluptuous.Length(min=1, max=1), - voluptuous.Any( - {"id": voluptuous.Any( - [_ResourceUUID], _ResourceUUID), - voluptuous.Extra: voluptuous.Extra})), - voluptuous.Any( - u"and", u"∨", - u"or", u"∧", - u"not", - ): voluptuous.All( - [ResourceSearchSchema], voluptuous.Length(min=1) - ) - } - ) - ) - - -class SearchResourceTypeController(rest.RestController): - def __init__(self, resource_type): - self._resource_type = resource_type - - @staticmethod - def parse_and_validate_qs_filter(query): - try: - attr_filter = QueryStringSearchAttrFilter.parse(query) - except InvalidQueryStringSearchAttrFilter as e: - raise abort(400, e) - return voluptuous.Schema(ResourceSearchSchema, - required=True)(attr_filter) - - def _search(self, **kwargs): - if pecan.request.body: - attr_filter = deserialize_and_validate(ResourceSearchSchema) - elif kwargs.get("filter"): - attr_filter = self.parse_and_validate_qs_filter(kwargs["filter"]) - else: - attr_filter = None - - details = get_details(kwargs) - history = get_history(kwargs) - pagination_opts = get_pagination_options( - kwargs, RESOURCE_DEFAULT_PAGINATION) - - policy_filter = pecan.request.auth_helper.get_resource_policy_filter( - pecan.request.headers, "search resource", self._resource_type) - if policy_filter: - if attr_filter: - attr_filter = {"and": [ - policy_filter, - attr_filter - ]} - else: - attr_filter = policy_filter - - return pecan.request.indexer.list_resources( - self._resource_type, - attribute_filter=attr_filter, - details=details, - history=history, - **pagination_opts) - - @pecan.expose('json') - def post(self, **kwargs): - try: - return self._search(**kwargs) - except indexer.IndexerException as e: - abort(400, e) - - -class SearchResourceController(rest.RestController): - @pecan.expose() - def _lookup(self, resource_type, *remainder): - try: - pecan.request.indexer.get_resource_type(resource_type) - except indexer.NoSuchResourceType as e: - abort(404, e) - return SearchResourceTypeController(resource_type), remainder - - -def _MetricSearchSchema(v): - """Helper method to indirect the recursivity of the search schema""" - return SearchMetricController.MetricSearchSchema(v) - - -def _MetricSearchOperationSchema(v): - """Helper method to indirect the recursivity of the search schema""" - return SearchMetricController.MetricSearchOperationSchema(v) - - -class SearchMetricController(rest.RestController): - - MetricSearchOperationSchema = voluptuous.Schema( - voluptuous.All( - voluptuous.Length(min=1, max=1), - { - voluptuous.Any( - u"=", u"==", u"eq", - u"<", u"lt", - u">", u"gt", - u"<=", u"≤", u"le", - u">=", u"≥", u"ge", - u"!=", u"≠", u"ne", - u"%", u"mod", - u"+", u"add", - u"-", u"sub", - u"*", u"×", u"mul", - u"/", u"÷", u"div", - u"**", u"^", u"pow", - ): voluptuous.Any( - float, int, - voluptuous.All( - [float, int, - voluptuous.Any(_MetricSearchOperationSchema)], - voluptuous.Length(min=2, max=2), - ), - ), - }, - ) - ) - - MetricSearchSchema = voluptuous.Schema( - voluptuous.Any( - MetricSearchOperationSchema, - voluptuous.All( - voluptuous.Length(min=1, max=1), - { - voluptuous.Any( - u"and", u"∨", - u"or", u"∧", - u"not", - ): [_MetricSearchSchema], - } - ) - ) - ) - - @pecan.expose('json') - def post(self, metric_id, start=None, stop=None, aggregation='mean', - granularity=None): - granularity = [Timespan(g) - for g in arg_to_list(granularity or [])] - metrics = pecan.request.indexer.list_metrics( - ids=arg_to_list(metric_id)) - - for metric in metrics: - enforce("search metric", metric) - - if not pecan.request.body: - abort(400, "No query specified in body") - - query = deserialize_and_validate(self.MetricSearchSchema) - - if start is not None: - try: - start = utils.to_datetime(start) - except Exception: - abort(400, "Invalid value for start") - - if stop is not None: - try: - stop = utils.to_datetime(stop) - except Exception: - abort(400, "Invalid value for stop") - - try: - return { - str(metric.id): values - for metric, values in six.iteritems( - pecan.request.storage.search_value( - metrics, query, start, stop, aggregation, - granularity - ) - ) - } - except storage.InvalidQuery as e: - abort(400, e) - except storage.GranularityDoesNotExist as e: - abort(400, e) - - -class ResourcesMetricsMeasuresBatchController(rest.RestController): - @pecan.expose('json') - def post(self, create_metrics=False): - creator = pecan.request.auth_helper.get_current_user( - pecan.request.headers) - MeasuresBatchSchema = voluptuous.Schema( - {functools.partial(ResourceID, creator=creator): - {six.text_type: MeasuresListSchema}} - ) - - body = deserialize_and_validate(MeasuresBatchSchema) - - known_metrics = [] - unknown_metrics = [] - unknown_resources = [] - body_by_rid = {} - for original_resource_id, resource_id in body: - body_by_rid[resource_id] = body[(original_resource_id, - resource_id)] - names = body[(original_resource_id, resource_id)].keys() - metrics = pecan.request.indexer.list_metrics( - names=names, resource_id=resource_id) - - known_names = [m.name for m in metrics] - if strtobool("create_metrics", create_metrics): - already_exists_names = [] - for name in names: - if name not in known_names: - metric = MetricsController.MetricSchema({ - "name": name - }) - try: - m = pecan.request.indexer.create_metric( - uuid.uuid4(), - creator=creator, - resource_id=resource_id, - name=metric.get('name'), - unit=metric.get('unit'), - archive_policy_name=metric[ - 'archive_policy_name']) - except indexer.NamedMetricAlreadyExists as e: - already_exists_names.append(e.metric) - except indexer.NoSuchResource: - unknown_resources.append({ - 'resource_id': six.text_type(resource_id), - 'original_resource_id': original_resource_id}) - break - except indexer.IndexerException as e: - # This catch NoSuchArchivePolicy, which is unlikely - # be still possible - abort(400, e) - else: - known_metrics.append(m) - - if already_exists_names: - # Add metrics created in the meantime - known_names.extend(already_exists_names) - known_metrics.extend( - pecan.request.indexer.list_metrics( - names=already_exists_names, - resource_id=resource_id) - ) - - elif len(names) != len(metrics): - unknown_metrics.extend( - ["%s/%s" % (six.text_type(resource_id), m) - for m in names if m not in known_names]) - - known_metrics.extend(metrics) - - if unknown_resources: - abort(400, {"cause": "Unknown resources", - "detail": unknown_resources}) - - if unknown_metrics: - abort(400, "Unknown metrics: %s" % ", ".join( - sorted(unknown_metrics))) - - for metric in known_metrics: - enforce("post measures", metric) - - pecan.request.storage.incoming.add_measures_batch( - dict((metric, - body_by_rid[metric.resource_id][metric.name]) - for metric in known_metrics)) - - pecan.response.status = 202 - - -class MetricsMeasuresBatchController(rest.RestController): - # NOTE(sileht): we don't allow to mix both formats - # to not have to deal with id collision that can - # occurs between a metric_id and a resource_id. - # Because while json allow duplicate keys in dict payload - # only the last key will be retain by json python module to - # build the python dict. - MeasuresBatchSchema = voluptuous.Schema( - {utils.UUID: MeasuresListSchema} - ) - - @pecan.expose() - def post(self): - body = deserialize_and_validate(self.MeasuresBatchSchema) - metrics = pecan.request.indexer.list_metrics(ids=body.keys()) - - if len(metrics) != len(body): - missing_metrics = sorted(set(body) - set(m.id for m in metrics)) - abort(400, "Unknown metrics: %s" % ", ".join( - six.moves.map(str, missing_metrics))) - - for metric in metrics: - enforce("post measures", metric) - - pecan.request.storage.incoming.add_measures_batch( - dict((metric, body[metric.id]) for metric in - metrics)) - - pecan.response.status = 202 - - -class SearchController(object): - resource = SearchResourceController() - metric = SearchMetricController() - - -class AggregationResourceController(rest.RestController): - def __init__(self, resource_type, metric_name): - self.resource_type = resource_type - self.metric_name = metric_name - - @pecan.expose('json') - def post(self, start=None, stop=None, aggregation='mean', - reaggregation=None, granularity=None, needed_overlap=100.0, - groupby=None, fill=None, refresh=False, resample=None): - # First, set groupby in the right format: a sorted list of unique - # strings. - groupby = sorted(set(arg_to_list(groupby))) - - # NOTE(jd) Sort by groupby so we are sure we do not return multiple - # groups when using itertools.groupby later. - try: - resources = SearchResourceTypeController( - self.resource_type)._search(sort=groupby) - except indexer.InvalidPagination: - abort(400, "Invalid groupby attribute") - except indexer.IndexerException as e: - abort(400, e) - - if resources is None: - return [] - - if not groupby: - metrics = list(filter(None, - (r.get_metric(self.metric_name) - for r in resources))) - return AggregationController.get_cross_metric_measures_from_objs( - metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap, fill, refresh, resample) - - def groupper(r): - return tuple((attr, r[attr]) for attr in groupby) - - results = [] - for key, resources in itertools.groupby(resources, groupper): - metrics = list(filter(None, - (r.get_metric(self.metric_name) - for r in resources))) - results.append({ - "group": dict(key), - "measures": AggregationController.get_cross_metric_measures_from_objs( # noqa - metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap, fill, refresh, resample) - }) - - return results - - -class AggregationController(rest.RestController): - _custom_actions = { - 'metric': ['GET'], - } - - @pecan.expose() - def _lookup(self, object_type, resource_type, key, metric_name, - *remainder): - if object_type != "resource" or key != "metric": - # NOTE(sileht): we want the raw 404 message here - # so use directly pecan - pecan.abort(404) - try: - pecan.request.indexer.get_resource_type(resource_type) - except indexer.NoSuchResourceType as e: - abort(404, e) - return AggregationResourceController(resource_type, - metric_name), remainder - - @staticmethod - def get_cross_metric_measures_from_objs(metrics, start=None, stop=None, - aggregation='mean', - reaggregation=None, - granularity=None, - needed_overlap=100.0, fill=None, - refresh=False, resample=None): - try: - needed_overlap = float(needed_overlap) - except ValueError: - abort(400, 'needed_overlap must be a number') - - if start is not None: - try: - start = utils.to_datetime(start) - except Exception: - abort(400, "Invalid value for start") - - if stop is not None: - try: - stop = utils.to_datetime(stop) - except Exception: - abort(400, "Invalid value for stop") - - if (aggregation - not in archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS): - abort( - 400, - 'Invalid aggregation value %s, must be one of %s' - % (aggregation, - archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)) - - for metric in metrics: - enforce("get metric", metric) - - number_of_metrics = len(metrics) - if number_of_metrics == 0: - return [] - if granularity is not None: - try: - granularity = Timespan(granularity) - except ValueError as e: - abort(400, e) - - if resample: - if not granularity: - abort(400, 'A granularity must be specified to resample') - try: - resample = Timespan(resample) - except ValueError as e: - abort(400, e) - - if fill is not None: - if granularity is None: - abort(400, "Unable to fill without a granularity") - try: - fill = float(fill) - except ValueError as e: - if fill != 'null': - abort(400, "fill must be a float or \'null\': %s" % e) - - try: - if strtobool("refresh", refresh): - store = pecan.request.storage - metrics_to_update = [ - m for m in metrics if store.incoming.has_unprocessed(m)] - for m in metrics_to_update: - try: - pecan.request.storage.refresh_metric( - pecan.request.indexer, m, - pecan.request.conf.api.refresh_timeout) - except storage.SackLockTimeoutError as e: - abort(503, e) - if number_of_metrics == 1: - # NOTE(sileht): don't do the aggregation if we only have one - # metric - measures = pecan.request.storage.get_measures( - metrics[0], start, stop, aggregation, - granularity, resample) - else: - measures = pecan.request.storage.get_cross_metric_measures( - metrics, start, stop, aggregation, - reaggregation, resample, granularity, needed_overlap, fill) - # Replace timestamp keys by their string versions - return [(timestamp.isoformat(), offset, v) - for timestamp, offset, v in measures] - except storage.MetricUnaggregatable as e: - abort(400, ("One of the metrics being aggregated doesn't have " - "matching granularity: %s") % str(e)) - except storage.MetricDoesNotExist as e: - abort(404, e) - except storage.AggregationDoesNotExist as e: - abort(404, e) - - @pecan.expose('json') - def get_metric(self, metric=None, start=None, stop=None, - aggregation='mean', reaggregation=None, granularity=None, - needed_overlap=100.0, fill=None, - refresh=False, resample=None): - # Check RBAC policy - metric_ids = arg_to_list(metric) - metrics = pecan.request.indexer.list_metrics(ids=metric_ids) - missing_metric_ids = (set(metric_ids) - - set(six.text_type(m.id) for m in metrics)) - if missing_metric_ids: - # Return one of the missing one in the error - abort(404, storage.MetricDoesNotExist( - missing_metric_ids.pop())) - return self.get_cross_metric_measures_from_objs( - metrics, start, stop, aggregation, reaggregation, - granularity, needed_overlap, fill, refresh, resample) - - -class CapabilityController(rest.RestController): - @staticmethod - @pecan.expose('json') - def get(): - aggregation_methods = set( - archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS) - return dict(aggregation_methods=aggregation_methods, - dynamic_aggregation_methods=[ - ext.name for ext in extension.ExtensionManager( - namespace='gnocchi.aggregates') - ]) - - -class StatusController(rest.RestController): - @staticmethod - @pecan.expose('json') - def get(details=True): - enforce("get status", {}) - try: - report = pecan.request.storage.incoming.measures_report( - strtobool("details", details)) - except incoming.ReportGenerationError: - abort(503, 'Unable to generate status. Please retry.') - report_dict = {"storage": {"summary": report['summary']}} - if 'details' in report: - report_dict["storage"]["measures_to_process"] = report['details'] - return report_dict - - -class MetricsBatchController(object): - measures = MetricsMeasuresBatchController() - - -class ResourcesMetricsBatchController(object): - measures = ResourcesMetricsMeasuresBatchController() - - -class ResourcesBatchController(object): - metrics = ResourcesMetricsBatchController() - - -class BatchController(object): - metrics = MetricsBatchController() - resources = ResourcesBatchController() - - -class V1Controller(object): - - def __init__(self): - self.sub_controllers = { - "search": SearchController(), - "archive_policy": ArchivePoliciesController(), - "archive_policy_rule": ArchivePolicyRulesController(), - "metric": MetricsController(), - "batch": BatchController(), - "resource": ResourcesByTypeController(), - "resource_type": ResourceTypesController(), - "aggregation": AggregationController(), - "capabilities": CapabilityController(), - "status": StatusController(), - } - for name, ctrl in self.sub_controllers.items(): - setattr(self, name, ctrl) - - @pecan.expose('json') - def index(self): - return { - "version": "1.0", - "links": [ - {"rel": "self", - "href": pecan.request.application_url} - ] + [ - {"rel": name, - "href": pecan.request.application_url + "/" + name} - for name in sorted(self.sub_controllers) - ] - } - - -class VersionsController(object): - @staticmethod - @pecan.expose('json') - def index(): - return { - "versions": [ - { - "status": "CURRENT", - "links": [ - { - "rel": "self", - "href": pecan.request.application_url + "/v1/" - } - ], - "id": "v1.0", - "updated": "2015-03-19" - } - ] - } diff --git a/gnocchi/rest/api-paste.ini b/gnocchi/rest/api-paste.ini deleted file mode 100644 index 47bb3c32d..000000000 --- a/gnocchi/rest/api-paste.ini +++ /dev/null @@ -1,46 +0,0 @@ -[composite:gnocchi+noauth] -use = egg:Paste#urlmap -/ = gnocchiversions_pipeline -/v1 = gnocchiv1+noauth -/healthcheck = healthcheck - -[composite:gnocchi+basic] -use = egg:Paste#urlmap -/ = gnocchiversions_pipeline -/v1 = gnocchiv1+noauth -/healthcheck = healthcheck - -[composite:gnocchi+keystone] -use = egg:Paste#urlmap -/ = gnocchiversions_pipeline -/v1 = gnocchiv1+keystone -/healthcheck = healthcheck - -[pipeline:gnocchiv1+noauth] -pipeline = http_proxy_to_wsgi gnocchiv1 - -[pipeline:gnocchiv1+keystone] -pipeline = http_proxy_to_wsgi keystone_authtoken gnocchiv1 - -[pipeline:gnocchiversions_pipeline] -pipeline = http_proxy_to_wsgi gnocchiversions - -[app:gnocchiversions] -paste.app_factory = gnocchi.rest.app:app_factory -root = gnocchi.rest.VersionsController - -[app:gnocchiv1] -paste.app_factory = gnocchi.rest.app:app_factory -root = gnocchi.rest.V1Controller - -[filter:keystone_authtoken] -use = egg:keystonemiddleware#auth_token -oslo_config_project = gnocchi - -[filter:http_proxy_to_wsgi] -use = egg:oslo.middleware#http_proxy_to_wsgi -oslo_config_project = gnocchi - -[app:healthcheck] -use = egg:oslo.middleware#healthcheck -oslo_config_project = gnocchi diff --git a/gnocchi/rest/app.py b/gnocchi/rest/app.py deleted file mode 100644 index 02022bd9f..000000000 --- a/gnocchi/rest/app.py +++ /dev/null @@ -1,143 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2014-2016 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os -import pkg_resources -import uuid -import warnings - -from oslo_log import log -from oslo_middleware import cors -from oslo_policy import policy -from paste import deploy -import pecan -from pecan import jsonify -from stevedore import driver -import webob.exc - -from gnocchi import exceptions -from gnocchi import indexer as gnocchi_indexer -from gnocchi import json -from gnocchi import service -from gnocchi import storage as gnocchi_storage - - -LOG = log.getLogger(__name__) - - -# Register our encoder by default for everything -jsonify.jsonify.register(object)(json.to_primitive) - - -class GnocchiHook(pecan.hooks.PecanHook): - - def __init__(self, storage, indexer, conf): - self.storage = storage - self.indexer = indexer - self.conf = conf - self.policy_enforcer = policy.Enforcer(conf) - self.auth_helper = driver.DriverManager("gnocchi.rest.auth_helper", - conf.api.auth_mode, - invoke_on_load=True).driver - - def on_route(self, state): - state.request.storage = self.storage - state.request.indexer = self.indexer - state.request.conf = self.conf - state.request.policy_enforcer = self.policy_enforcer - state.request.auth_helper = self.auth_helper - - -class NotImplementedMiddleware(object): - def __init__(self, app): - self.app = app - - def __call__(self, environ, start_response): - try: - return self.app(environ, start_response) - except exceptions.NotImplementedError: - raise webob.exc.HTTPNotImplemented( - "Sorry, this Gnocchi server does " - "not implement this feature 😞") - -# NOTE(sileht): pastedeploy uses ConfigParser to handle -# global_conf, since python 3 ConfigParser doesn't -# allow to store object as config value, only strings are -# permit, so to be able to pass an object created before paste load -# the app, we store them into a global var. But the each loaded app -# store it's configuration in unique key to be concurrency safe. -global APPCONFIGS -APPCONFIGS = {} - - -def load_app(conf, indexer=None, storage=None, - not_implemented_middleware=True): - global APPCONFIGS - - # NOTE(sileht): We load config, storage and indexer, - # so all - if not storage: - storage = gnocchi_storage.get_driver(conf) - if not indexer: - indexer = gnocchi_indexer.get_driver(conf) - indexer.connect() - - # Build the WSGI app - cfg_path = conf.api.paste_config - if not os.path.isabs(cfg_path): - cfg_path = conf.find_file(cfg_path) - - if cfg_path is None or not os.path.exists(cfg_path): - LOG.debug("No api-paste configuration file found! Using default.") - cfg_path = pkg_resources.resource_filename(__name__, "api-paste.ini") - - config = dict(conf=conf, indexer=indexer, storage=storage, - not_implemented_middleware=not_implemented_middleware) - configkey = str(uuid.uuid4()) - APPCONFIGS[configkey] = config - - LOG.info("WSGI config used: %s", cfg_path) - - if conf.api.auth_mode == "noauth": - warnings.warn("The `noauth' authentication mode is deprecated", - category=DeprecationWarning) - - appname = "gnocchi+" + conf.api.auth_mode - app = deploy.loadapp("config:" + cfg_path, name=appname, - global_conf={'configkey': configkey}) - return cors.CORS(app, conf=conf) - - -def _setup_app(root, conf, indexer, storage, not_implemented_middleware): - app = pecan.make_app( - root, - hooks=(GnocchiHook(storage, indexer, conf),), - guess_content_type_from_ext=False, - ) - - if not_implemented_middleware: - app = webob.exc.HTTPExceptionMiddleware(NotImplementedMiddleware(app)) - - return app - - -def app_factory(global_config, **local_conf): - global APPCONFIGS - appconfig = APPCONFIGS.get(global_config.get('configkey')) - return _setup_app(root=local_conf.get('root'), **appconfig) - - -def build_wsgi_app(): - return load_app(service.prepare_service()) diff --git a/gnocchi/rest/app.wsgi b/gnocchi/rest/app.wsgi deleted file mode 100644 index 475d9acb1..000000000 --- a/gnocchi/rest/app.wsgi +++ /dev/null @@ -1,29 +0,0 @@ -# -# Copyright 2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Use this file for deploying the API under mod_wsgi. - -See http://pecan.readthedocs.org/en/latest/deployment.html for details. -""" - -import debtcollector - -from gnocchi.rest import app - -application = app.build_wsgi_app() -debtcollector.deprecate(prefix="The wsgi script gnocchi/rest/app.wsgi is deprecated", - postfix=", please use gnocchi-api binary as wsgi script instead", - version="4.0", removal_version="4.1", - category=RuntimeWarning) diff --git a/gnocchi/rest/auth_helper.py b/gnocchi/rest/auth_helper.py deleted file mode 100644 index 46c0893cd..000000000 --- a/gnocchi/rest/auth_helper.py +++ /dev/null @@ -1,125 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016 Red Hat, Inc. -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import webob -import werkzeug.http - -from gnocchi import rest - - -class KeystoneAuthHelper(object): - @staticmethod - def get_current_user(headers): - # FIXME(jd) should have domain but should not break existing :( - user_id = headers.get("X-User-Id", "") - project_id = headers.get("X-Project-Id", "") - return user_id + ":" + project_id - - @staticmethod - def get_auth_info(headers): - user_id = headers.get("X-User-Id") - project_id = headers.get("X-Project-Id") - return { - "user": (user_id or "") + ":" + (project_id or ""), - "user_id": user_id, - "project_id": project_id, - 'domain_id': headers.get("X-Domain-Id"), - 'roles': headers.get("X-Roles", "").split(","), - } - - @staticmethod - def get_resource_policy_filter(headers, rule, resource_type): - try: - # Check if the policy allows the user to list any resource - rest.enforce(rule, { - "resource_type": resource_type, - }) - except webob.exc.HTTPForbidden: - policy_filter = [] - project_id = headers.get("X-Project-Id") - - try: - # Check if the policy allows the user to list resources linked - # to their project - rest.enforce(rule, { - "resource_type": resource_type, - "project_id": project_id, - }) - except webob.exc.HTTPForbidden: - pass - else: - policy_filter.append({"=": {"project_id": project_id}}) - - try: - # Check if the policy allows the user to list resources linked - # to their created_by_project - rest.enforce(rule, { - "resource_type": resource_type, - "created_by_project_id": project_id, - }) - except webob.exc.HTTPForbidden: - pass - else: - if project_id: - policy_filter.append( - {"like": {"creator": "%:" + project_id}}) - else: - policy_filter.append({"=": {"creator": None}}) - - if not policy_filter: - # We need to have at least one policy filter in place - rest.abort(403, "Insufficient privileges") - - return {"or": policy_filter} - - -class NoAuthHelper(KeystoneAuthHelper): - @staticmethod - def get_current_user(headers): - # FIXME(jd) Should be a single header - user_id = headers.get("X-User-Id") - project_id = headers.get("X-Project-Id") - if user_id: - if project_id: - return user_id + ":" + project_id - return user_id - if project_id: - return project_id - rest.abort(401, "Unable to determine current user") - - -class BasicAuthHelper(object): - @staticmethod - def get_current_user(headers): - auth = werkzeug.http.parse_authorization_header( - headers.get("Authorization")) - if auth is None: - rest.abort(401) - return auth.username - - def get_auth_info(self, headers): - user = self.get_current_user(headers) - roles = [] - if user == "admin": - roles.append("admin") - return { - "user": user, - "roles": roles - } - - @staticmethod - def get_resource_policy_filter(headers, rule, resource_type): - return None diff --git a/gnocchi/rest/policy.json b/gnocchi/rest/policy.json deleted file mode 100644 index 51d396747..000000000 --- a/gnocchi/rest/policy.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "admin_or_creator": "role:admin or user:%(creator)s or project_id:%(created_by_project_id)s", - "resource_owner": "project_id:%(project_id)s", - "metric_owner": "project_id:%(resource.project_id)s", - - "get status": "role:admin", - - "create resource": "", - "get resource": "rule:admin_or_creator or rule:resource_owner", - "update resource": "rule:admin_or_creator", - "delete resource": "rule:admin_or_creator", - "delete resources": "rule:admin_or_creator", - "list resource": "rule:admin_or_creator or rule:resource_owner", - "search resource": "rule:admin_or_creator or rule:resource_owner", - - "create resource type": "role:admin", - "delete resource type": "role:admin", - "update resource type": "role:admin", - "list resource type": "", - "get resource type": "", - - "get archive policy": "", - "list archive policy": "", - "create archive policy": "role:admin", - "update archive policy": "role:admin", - "delete archive policy": "role:admin", - - "create archive policy rule": "role:admin", - "get archive policy rule": "", - "list archive policy rule": "", - "delete archive policy rule": "role:admin", - - "create metric": "", - "delete metric": "rule:admin_or_creator", - "get metric": "rule:admin_or_creator or rule:metric_owner", - "search metric": "rule:admin_or_creator or rule:metric_owner", - "list metric": "", - "list all metric": "role:admin", - - "get measures": "rule:admin_or_creator or rule:metric_owner", - "post measures": "rule:admin_or_creator" -} diff --git a/gnocchi/service.py b/gnocchi/service.py deleted file mode 100644 index 26b8e7dda..000000000 --- a/gnocchi/service.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) 2016-2017 Red Hat, Inc. -# Copyright (c) 2015 eNovance -# Copyright (c) 2013 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from oslo_config import cfg -from oslo_db import options as db_options -from oslo_log import log -from oslo_policy import opts as policy_opts -import pbr.version -from six.moves.urllib import parse as urlparse - -from gnocchi import archive_policy -from gnocchi import opts -from gnocchi import utils - -LOG = log.getLogger(__name__) - - -def prepare_service(args=None, conf=None, - default_config_files=None): - if conf is None: - conf = cfg.ConfigOpts() - opts.set_defaults() - # FIXME(jd) Use the pkg_entry info to register the options of these libs - log.register_options(conf) - db_options.set_defaults(conf) - policy_opts.set_defaults(conf) - - # Register our own Gnocchi options - for group, options in opts.list_opts(): - conf.register_opts(list(options), - group=None if group == "DEFAULT" else group) - - conf.set_default("workers", utils.get_default_workers(), group="metricd") - - conf(args, project='gnocchi', validate_default_values=True, - default_config_files=default_config_files, - version=pbr.version.VersionInfo('gnocchi').version_string()) - - # HACK(jd) I'm not happy about that, fix AP class to handle a conf object? - archive_policy.ArchivePolicy.DEFAULT_AGGREGATION_METHODS = ( - conf.archive_policy.default_aggregation_methods - ) - - # If no coordination URL is provided, default to using the indexer as - # coordinator - if conf.storage.coordination_url is None: - if conf.storage.driver == "redis": - conf.set_default("coordination_url", - conf.storage.redis_url, - "storage") - elif conf.incoming.driver == "redis": - conf.set_default("coordination_url", - conf.incoming.redis_url, - "storage") - else: - parsed = urlparse.urlparse(conf.indexer.url) - proto, _, _ = parsed.scheme.partition("+") - parsed = list(parsed) - # Set proto without the + part - parsed[0] = proto - conf.set_default("coordination_url", - urlparse.urlunparse(parsed), - "storage") - - cfg_path = conf.oslo_policy.policy_file - if not os.path.isabs(cfg_path): - cfg_path = conf.find_file(cfg_path) - if cfg_path is None or not os.path.exists(cfg_path): - cfg_path = os.path.abspath(os.path.join(os.path.dirname(__file__), - 'rest', 'policy.json')) - conf.set_default('policy_file', cfg_path, group='oslo_policy') - - log.set_defaults(default_log_levels=log.get_default_log_levels() + - ["passlib.utils.compat=INFO"]) - log.setup(conf, 'gnocchi') - conf.log_opt_values(LOG, log.DEBUG) - - return conf diff --git a/gnocchi/statsd.py b/gnocchi/statsd.py deleted file mode 100644 index 267df4978..000000000 --- a/gnocchi/statsd.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright (c) 2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import itertools -import uuid - -try: - import asyncio -except ImportError: - import trollius as asyncio -from oslo_config import cfg -from oslo_log import log -import six - -from gnocchi import indexer -from gnocchi import service -from gnocchi import storage -from gnocchi import utils - - -LOG = log.getLogger(__name__) - - -class Stats(object): - def __init__(self, conf): - self.conf = conf - self.storage = storage.get_driver(self.conf) - self.indexer = indexer.get_driver(self.conf) - self.indexer.connect() - try: - self.indexer.create_resource('generic', - self.conf.statsd.resource_id, - self.conf.statsd.creator) - except indexer.ResourceAlreadyExists: - LOG.debug("Resource %s already exists", - self.conf.statsd.resource_id) - else: - LOG.info("Created resource %s", self.conf.statsd.resource_id) - self.gauges = {} - self.counters = {} - self.times = {} - - def reset(self): - self.gauges.clear() - self.counters.clear() - self.times.clear() - - def treat_metric(self, metric_name, metric_type, value, sampling): - metric_name += "|" + metric_type - if metric_type == "ms": - if sampling is not None: - raise ValueError( - "Invalid sampling for ms: `%d`, should be none" - % sampling) - self.times[metric_name] = storage.Measure( - utils.dt_in_unix_ns(utils.utcnow()), value) - elif metric_type == "g": - if sampling is not None: - raise ValueError( - "Invalid sampling for g: `%d`, should be none" - % sampling) - self.gauges[metric_name] = storage.Measure( - utils.dt_in_unix_ns(utils.utcnow()), value) - elif metric_type == "c": - sampling = 1 if sampling is None else sampling - if metric_name in self.counters: - current_value = self.counters[metric_name].value - else: - current_value = 0 - self.counters[metric_name] = storage.Measure( - utils.dt_in_unix_ns(utils.utcnow()), - current_value + (value * (1 / sampling))) - # TODO(jd) Support "set" type - # elif metric_type == "s": - # pass - else: - raise ValueError("Unknown metric type `%s'" % metric_type) - - def flush(self): - resource = self.indexer.get_resource('generic', - self.conf.statsd.resource_id, - with_metrics=True) - - for metric_name, measure in itertools.chain( - six.iteritems(self.gauges), - six.iteritems(self.counters), - six.iteritems(self.times)): - try: - # NOTE(jd) We avoid considering any concurrency here as statsd - # is not designed to run in parallel and we do not envision - # operators manipulating the resource/metrics using the Gnocchi - # API at the same time. - metric = resource.get_metric(metric_name) - if not metric: - ap_name = self._get_archive_policy_name(metric_name) - metric = self.indexer.create_metric( - uuid.uuid4(), - self.conf.statsd.creator, - archive_policy_name=ap_name, - name=metric_name, - resource_id=self.conf.statsd.resource_id) - self.storage.incoming.add_measures(metric, (measure,)) - except Exception as e: - LOG.error("Unable to add measure %s: %s", - metric_name, e) - - self.reset() - - def _get_archive_policy_name(self, metric_name): - if self.conf.statsd.archive_policy_name: - return self.conf.statsd.archive_policy_name - # NOTE(sileht): We didn't catch NoArchivePolicyRuleMatch to log it - ap = self.indexer.get_archive_policy_for_metric(metric_name) - return ap.name - - -class StatsdServer(object): - def __init__(self, stats): - self.stats = stats - - @staticmethod - def connection_made(transport): - pass - - def datagram_received(self, data, addr): - LOG.debug("Received data `%r' from %s", data, addr) - try: - messages = [m for m in data.decode().split("\n") if m] - except Exception as e: - LOG.error("Unable to decode datagram: %s", e) - return - for message in messages: - metric = message.split("|") - if len(metric) == 2: - metric_name, metric_type = metric - sampling = None - elif len(metric) == 3: - metric_name, metric_type, sampling = metric - else: - LOG.error("Invalid number of | in `%s'", message) - continue - sampling = float(sampling[1:]) if sampling is not None else None - metric_name, metric_str_val = metric_name.split(':') - # NOTE(jd): We do not support +/- gauge, and we delete gauge on - # each flush. - value = float(metric_str_val) - try: - self.stats.treat_metric(metric_name, metric_type, - value, sampling) - except Exception as e: - LOG.error("Unable to treat metric %s: %s", message, str(e)) - - -def start(): - conf = service.prepare_service() - - if conf.statsd.resource_id is None: - raise cfg.RequiredOptError("resource_id", cfg.OptGroup("statsd")) - - stats = Stats(conf) - - loop = asyncio.get_event_loop() - # TODO(jd) Add TCP support - listen = loop.create_datagram_endpoint( - lambda: StatsdServer(stats), - local_addr=(conf.statsd.host, conf.statsd.port)) - - def _flush(): - loop.call_later(conf.statsd.flush_delay, _flush) - stats.flush() - - loop.call_later(conf.statsd.flush_delay, _flush) - transport, protocol = loop.run_until_complete(listen) - - LOG.info("Started on %s:%d", conf.statsd.host, conf.statsd.port) - LOG.info("Flush delay: %d seconds", conf.statsd.flush_delay) - - try: - loop.run_forever() - except KeyboardInterrupt: - pass - - transport.close() - loop.close() diff --git a/gnocchi/storage/__init__.py b/gnocchi/storage/__init__.py deleted file mode 100644 index d06a47cff..000000000 --- a/gnocchi/storage/__init__.py +++ /dev/null @@ -1,372 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import operator -from oslo_config import cfg -from oslo_log import log -from stevedore import driver - -from gnocchi import exceptions -from gnocchi import indexer - - -OPTS = [ - cfg.StrOpt('driver', - default='file', - help='Storage driver to use'), -] - -LOG = log.getLogger(__name__) - - -class Measure(object): - def __init__(self, timestamp, value): - self.timestamp = timestamp - self.value = value - - def __iter__(self): - """Allow to transform measure to tuple.""" - yield self.timestamp - yield self.value - - -class Metric(object): - def __init__(self, id, archive_policy, - creator=None, - name=None, - resource_id=None): - self.id = id - self.archive_policy = archive_policy - self.creator = creator - self.name = name - self.resource_id = resource_id - - def __repr__(self): - return '<%s %s>' % (self.__class__.__name__, self.id) - - def __str__(self): - return str(self.id) - - def __eq__(self, other): - return (isinstance(other, Metric) - and self.id == other.id - and self.archive_policy == other.archive_policy - and self.creator == other.creator - and self.name == other.name - and self.resource_id == other.resource_id) - - __hash__ = object.__hash__ - - -class StorageError(Exception): - pass - - -class InvalidQuery(StorageError): - pass - - -class MetricDoesNotExist(StorageError): - """Error raised when this metric does not exist.""" - - def __init__(self, metric): - self.metric = metric - super(MetricDoesNotExist, self).__init__( - "Metric %s does not exist" % metric) - - -class AggregationDoesNotExist(StorageError): - """Error raised when the aggregation method doesn't exists for a metric.""" - - def __init__(self, metric, method): - self.metric = metric - self.method = method - super(AggregationDoesNotExist, self).__init__( - "Aggregation method '%s' for metric %s does not exist" % - (method, metric)) - - -class GranularityDoesNotExist(StorageError): - """Error raised when the granularity doesn't exist for a metric.""" - - def __init__(self, metric, granularity): - self.metric = metric - self.granularity = granularity - super(GranularityDoesNotExist, self).__init__( - "Granularity '%s' for metric %s does not exist" % - (granularity, metric)) - - -class MetricAlreadyExists(StorageError): - """Error raised when this metric already exists.""" - - def __init__(self, metric): - self.metric = metric - super(MetricAlreadyExists, self).__init__( - "Metric %s already exists" % metric) - - -class MetricUnaggregatable(StorageError): - """Error raised when metrics can't be aggregated.""" - - def __init__(self, metrics, reason): - self.metrics = metrics - self.reason = reason - super(MetricUnaggregatable, self).__init__( - "Metrics %s can't be aggregated: %s" - % (", ".join((str(m.id) for m in metrics)), reason)) - - -class LockedMetric(StorageError): - """Error raised when this metric is already being handled by another.""" - - def __init__(self, metric): - self.metric = metric - super(LockedMetric, self).__init__("Metric %s is locked" % metric) - - -def get_driver_class(namespace, conf): - """Return the storage driver class. - - :param conf: The conf to use to determine the driver. - """ - return driver.DriverManager(namespace, - conf.driver).driver - - -def get_driver(conf): - """Return the configured driver.""" - incoming = get_driver_class('gnocchi.incoming', conf.incoming)( - conf.incoming) - return get_driver_class('gnocchi.storage', conf.storage)( - conf.storage, incoming) - - -class StorageDriver(object): - def __init__(self, conf, incoming): - self.incoming = incoming - - @staticmethod - def stop(): - pass - - def upgrade(self, index, num_sacks): - self.incoming.upgrade(index, num_sacks) - - def process_background_tasks(self, index, metrics, sync=False): - """Process background tasks for this storage. - - This calls :func:`process_new_measures` to process new measures - - :param index: An indexer to be used for querying metrics - :param metrics: The list of metrics waiting for processing - :param sync: If True, then process everything synchronously and raise - on error - :type sync: bool - """ - LOG.debug("Processing new measures") - try: - self.process_new_measures(index, metrics, sync) - except Exception: - if sync: - raise - LOG.error("Unexpected error during measures processing", - exc_info=True) - - def expunge_metrics(self, index, sync=False): - """Remove deleted metrics - - :param index: An indexer to be used for querying metrics - :param sync: If True, then delete everything synchronously and raise - on error - :type sync: bool - """ - - metrics_to_expunge = index.list_metrics(status='delete') - for m in metrics_to_expunge: - try: - self.delete_metric(m, sync) - index.expunge_metric(m.id) - except (indexer.NoSuchMetric, LockedMetric): - # It's possible another process deleted or is deleting the - # metric, not a big deal - pass - except Exception: - if sync: - raise - LOG.error("Unable to expunge metric %s from storage", m, - exc_info=True) - - @staticmethod - def process_new_measures(indexer, metrics, sync=False): - """Process added measures in background. - - Some drivers might need to have a background task running that process - the measures sent to metrics. This is used for that. - """ - - @staticmethod - def get_measures(metric, from_timestamp=None, to_timestamp=None, - aggregation='mean', granularity=None, resample=None): - """Get a measure to a metric. - - :param metric: The metric measured. - :param from timestamp: The timestamp to get the measure from. - :param to timestamp: The timestamp to get the measure to. - :param aggregation: The type of aggregation to retrieve. - :param granularity: The granularity to retrieve. - :param resample: The granularity to resample to. - """ - if aggregation not in metric.archive_policy.aggregation_methods: - raise AggregationDoesNotExist(metric, aggregation) - - @staticmethod - def delete_metric(metric, sync=False): - raise exceptions.NotImplementedError - - @staticmethod - def get_cross_metric_measures(metrics, from_timestamp=None, - to_timestamp=None, aggregation='mean', - reaggregation=None, resample=None, - granularity=None, needed_overlap=None, - fill=None): - """Get aggregated measures of multiple entities. - - :param entities: The entities measured to aggregate. - :param from timestamp: The timestamp to get the measure from. - :param to timestamp: The timestamp to get the measure to. - :param granularity: The granularity to retrieve. - :param aggregation: The type of aggregation to retrieve. - :param reaggregation: The type of aggregation to compute - on the retrieved measures. - :param resample: The granularity to resample to. - :param fill: The value to use to fill in missing data in series. - """ - for metric in metrics: - if aggregation not in metric.archive_policy.aggregation_methods: - raise AggregationDoesNotExist(metric, aggregation) - if (granularity is not None and granularity - not in set(d.granularity - for d in metric.archive_policy.definition)): - raise GranularityDoesNotExist(metric, granularity) - - @staticmethod - def search_value(metrics, query, from_timestamp=None, - to_timestamp=None, - aggregation='mean', - granularity=None): - """Search for an aggregated value that realizes a predicate. - - :param metrics: The list of metrics to look into. - :param query: The query being sent. - :param from_timestamp: The timestamp to get the measure from. - :param to_timestamp: The timestamp to get the measure to. - :param aggregation: The type of aggregation to retrieve. - :param granularity: The granularity to retrieve. - """ - raise exceptions.NotImplementedError - - -class MeasureQuery(object): - binary_operators = { - u"=": operator.eq, - u"==": operator.eq, - u"eq": operator.eq, - - u"<": operator.lt, - u"lt": operator.lt, - - u">": operator.gt, - u"gt": operator.gt, - - u"<=": operator.le, - u"≤": operator.le, - u"le": operator.le, - - u">=": operator.ge, - u"≥": operator.ge, - u"ge": operator.ge, - - u"!=": operator.ne, - u"≠": operator.ne, - u"ne": operator.ne, - - u"%": operator.mod, - u"mod": operator.mod, - - u"+": operator.add, - u"add": operator.add, - - u"-": operator.sub, - u"sub": operator.sub, - - u"*": operator.mul, - u"×": operator.mul, - u"mul": operator.mul, - - u"/": operator.truediv, - u"÷": operator.truediv, - u"div": operator.truediv, - - u"**": operator.pow, - u"^": operator.pow, - u"pow": operator.pow, - } - - multiple_operators = { - u"or": any, - u"∨": any, - u"and": all, - u"∧": all, - } - - def __init__(self, tree): - self._eval = self.build_evaluator(tree) - - def __call__(self, value): - return self._eval(value) - - def build_evaluator(self, tree): - try: - operator, nodes = list(tree.items())[0] - except Exception: - return lambda value: tree - try: - op = self.multiple_operators[operator] - except KeyError: - try: - op = self.binary_operators[operator] - except KeyError: - raise InvalidQuery("Unknown operator %s" % operator) - return self._handle_binary_op(op, nodes) - return self._handle_multiple_op(op, nodes) - - def _handle_multiple_op(self, op, nodes): - elements = [self.build_evaluator(node) for node in nodes] - return lambda value: op((e(value) for e in elements)) - - def _handle_binary_op(self, op, node): - try: - iterator = iter(node) - except Exception: - return lambda value: op(value, node) - nodes = list(iterator) - if len(nodes) != 2: - raise InvalidQuery( - "Binary operator %s needs 2 arguments, %d given" % - (op, len(nodes))) - node0 = self.build_evaluator(node[0]) - node1 = self.build_evaluator(node[1]) - return lambda value: op(node0(value), node1(value)) diff --git a/gnocchi/storage/_carbonara.py b/gnocchi/storage/_carbonara.py deleted file mode 100644 index 65983ad1c..000000000 --- a/gnocchi/storage/_carbonara.py +++ /dev/null @@ -1,571 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016 Red Hat, Inc. -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import collections -import datetime -import itertools -import operator - -from concurrent import futures -import iso8601 -from oslo_config import cfg -from oslo_log import log -import six -import six.moves - -from gnocchi import carbonara -from gnocchi import storage -from gnocchi import utils - - -OPTS = [ - cfg.IntOpt('aggregation_workers_number', - default=1, min=1, - help='Number of threads to process and store aggregates. ' - 'Set value roughly equal to number of aggregates to be ' - 'computed per metric'), - cfg.StrOpt('coordination_url', - secret=True, - help='Coordination driver URL'), - -] - -LOG = log.getLogger(__name__) - - -class CorruptionError(ValueError): - """Data corrupted, damn it.""" - - def __init__(self, message): - super(CorruptionError, self).__init__(message) - - -class SackLockTimeoutError(Exception): - pass - - -class CarbonaraBasedStorage(storage.StorageDriver): - - def __init__(self, conf, incoming): - super(CarbonaraBasedStorage, self).__init__(conf, incoming) - self.aggregation_workers_number = conf.aggregation_workers_number - if self.aggregation_workers_number == 1: - # NOTE(jd) Avoid using futures at all if we don't want any threads. - self._map_in_thread = self._map_no_thread - else: - self._map_in_thread = self._map_in_futures_threads - self.coord, my_id = utils.get_coordinator_and_start( - conf.coordination_url) - - def stop(self): - self.coord.stop() - - @staticmethod - def _get_measures(metric, timestamp_key, aggregation, granularity, - version=3): - raise NotImplementedError - - @staticmethod - def _get_unaggregated_timeserie(metric, version=3): - raise NotImplementedError - - def _get_unaggregated_timeserie_and_unserialize( - self, metric, block_size, back_window): - """Retrieve unaggregated timeserie for a metric and unserialize it. - - Returns a gnocchi.carbonara.BoundTimeSerie object. If the data cannot - be retrieved, returns None. - - """ - with utils.StopWatch() as sw: - raw_measures = ( - self._get_unaggregated_timeserie( - metric) - ) - if not raw_measures: - return - LOG.debug( - "Retrieve unaggregated measures " - "for %s in %.2fs", - metric.id, sw.elapsed()) - try: - return carbonara.BoundTimeSerie.unserialize( - raw_measures, block_size, back_window) - except ValueError: - raise CorruptionError( - "Data corruption detected for %s " - "unaggregated timeserie" % metric.id) - - @staticmethod - def _store_unaggregated_timeserie(metric, data, version=3): - raise NotImplementedError - - @staticmethod - def _store_metric_measures(metric, timestamp_key, aggregation, - granularity, data, offset=None, version=3): - raise NotImplementedError - - @staticmethod - def _list_split_keys_for_metric(metric, aggregation, granularity, - version=3): - raise NotImplementedError - - @staticmethod - def _version_check(name, v): - """Validate object matches expected version. - - Version should be last attribute and start with 'v' - """ - return name.split("_")[-1] == 'v%s' % v - - def get_measures(self, metric, from_timestamp=None, to_timestamp=None, - aggregation='mean', granularity=None, resample=None): - super(CarbonaraBasedStorage, self).get_measures( - metric, from_timestamp, to_timestamp, aggregation) - if granularity is None: - agg_timeseries = self._map_in_thread( - self._get_measures_timeserie, - ((metric, aggregation, ap.granularity, - from_timestamp, to_timestamp) - for ap in reversed(metric.archive_policy.definition))) - else: - agg_timeseries = self._get_measures_timeserie( - metric, aggregation, granularity, - from_timestamp, to_timestamp) - if resample: - agg_timeseries = agg_timeseries.resample(resample) - agg_timeseries = [agg_timeseries] - - return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v) - for ts in agg_timeseries - for timestamp, r, v in ts.fetch(from_timestamp, to_timestamp)] - - def _get_measures_and_unserialize(self, metric, key, - aggregation, granularity): - data = self._get_measures(metric, key, aggregation, granularity) - try: - return carbonara.AggregatedTimeSerie.unserialize( - data, key, aggregation, granularity) - except carbonara.InvalidData: - LOG.error("Data corruption detected for %s " - "aggregated `%s' timeserie, granularity `%s' " - "around time `%s', ignoring.", - metric.id, aggregation, granularity, key) - - def _get_measures_timeserie(self, metric, - aggregation, granularity, - from_timestamp=None, to_timestamp=None): - - # Find the number of point - for d in metric.archive_policy.definition: - if d.granularity == granularity: - points = d.points - break - else: - raise storage.GranularityDoesNotExist(metric, granularity) - - all_keys = None - try: - all_keys = self._list_split_keys_for_metric( - metric, aggregation, granularity) - except storage.MetricDoesNotExist: - for d in metric.archive_policy.definition: - if d.granularity == granularity: - return carbonara.AggregatedTimeSerie( - sampling=granularity, - aggregation_method=aggregation, - max_size=d.points) - raise storage.GranularityDoesNotExist(metric, granularity) - - if from_timestamp: - from_timestamp = str( - carbonara.SplitKey.from_timestamp_and_sampling( - from_timestamp, granularity)) - - if to_timestamp: - to_timestamp = str( - carbonara.SplitKey.from_timestamp_and_sampling( - to_timestamp, granularity)) - - timeseries = filter( - lambda x: x is not None, - self._map_in_thread( - self._get_measures_and_unserialize, - ((metric, key, aggregation, granularity) - for key in all_keys - if ((not from_timestamp or key >= from_timestamp) - and (not to_timestamp or key <= to_timestamp)))) - ) - - return carbonara.AggregatedTimeSerie.from_timeseries( - sampling=granularity, - aggregation_method=aggregation, - timeseries=timeseries, - max_size=points) - - def _store_timeserie_split(self, metric, key, split, - aggregation, archive_policy_def, - oldest_mutable_timestamp): - # NOTE(jd) We write the full split only if the driver works that way - # (self.WRITE_FULL) or if the oldest_mutable_timestamp is out of range. - write_full = self.WRITE_FULL or next(key) <= oldest_mutable_timestamp - key_as_str = str(key) - if write_full: - try: - existing = self._get_measures_and_unserialize( - metric, key_as_str, aggregation, - archive_policy_def.granularity) - except storage.AggregationDoesNotExist: - pass - else: - if existing is not None: - if split is None: - split = existing - else: - split.merge(existing) - - if split is None: - # `split' can be none if existing is None and no split was passed - # in order to rewrite and compress the data; in that case, it means - # the split key is present and listed, but some aggregation method - # or granularity is missing. That means data is corrupted, but it - # does not mean we have to fail, we can just do nothing and log a - # warning. - LOG.warning("No data found for metric %s, granularity %f " - "and aggregation method %s (split key %s): " - "possible data corruption", - metric, archive_policy_def.granularity, - aggregation, key) - return - - offset, data = split.serialize(key, compressed=write_full) - - return self._store_metric_measures( - metric, key_as_str, aggregation, archive_policy_def.granularity, - data, offset=offset) - - def _add_measures(self, aggregation, archive_policy_def, - metric, grouped_serie, - previous_oldest_mutable_timestamp, - oldest_mutable_timestamp): - ts = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped_serie, archive_policy_def.granularity, - aggregation, max_size=archive_policy_def.points) - - # Don't do anything if the timeserie is empty - if not ts: - return - - # We only need to check for rewrite if driver is not in WRITE_FULL mode - # and if we already stored splits once - need_rewrite = ( - not self.WRITE_FULL - and previous_oldest_mutable_timestamp is not None - ) - - if archive_policy_def.timespan or need_rewrite: - existing_keys = self._list_split_keys_for_metric( - metric, aggregation, archive_policy_def.granularity) - - # First delete old splits - if archive_policy_def.timespan: - oldest_point_to_keep = ts.last - datetime.timedelta( - seconds=archive_policy_def.timespan) - oldest_key_to_keep = ts.get_split_key(oldest_point_to_keep) - oldest_key_to_keep_s = str(oldest_key_to_keep) - for key in list(existing_keys): - # NOTE(jd) Only delete if the key is strictly inferior to - # the timestamp; we don't delete any timeserie split that - # contains our timestamp, so we prefer to keep a bit more - # than deleting too much - if key < oldest_key_to_keep_s: - self._delete_metric_measures( - metric, key, aggregation, - archive_policy_def.granularity) - existing_keys.remove(key) - else: - oldest_key_to_keep = carbonara.SplitKey(0, 0) - - # Rewrite all read-only splits just for fun (and compression). This - # only happens if `previous_oldest_mutable_timestamp' exists, which - # means we already wrote some splits at some point – so this is not the - # first time we treat this timeserie. - if need_rewrite: - previous_oldest_mutable_key = str(ts.get_split_key( - previous_oldest_mutable_timestamp)) - oldest_mutable_key = str(ts.get_split_key( - oldest_mutable_timestamp)) - - if previous_oldest_mutable_key != oldest_mutable_key: - for key in existing_keys: - if previous_oldest_mutable_key <= key < oldest_mutable_key: - LOG.debug( - "Compressing previous split %s (%s) for metric %s", - key, aggregation, metric) - # NOTE(jd) Rewrite it entirely for fun (and later for - # compression). For that, we just pass None as split. - self._store_timeserie_split( - metric, carbonara.SplitKey( - float(key), archive_policy_def.granularity), - None, aggregation, archive_policy_def, - oldest_mutable_timestamp) - - for key, split in ts.split(): - if key >= oldest_key_to_keep: - LOG.debug( - "Storing split %s (%s) for metric %s", - key, aggregation, metric) - self._store_timeserie_split( - metric, key, split, aggregation, archive_policy_def, - oldest_mutable_timestamp) - - @staticmethod - def _delete_metric(metric): - raise NotImplementedError - - def delete_metric(self, metric, sync=False): - LOG.debug("Deleting metric %s", metric) - lock = self.incoming.get_sack_lock( - self.coord, self.incoming.sack_for_metric(metric.id)) - if not lock.acquire(blocking=sync): - raise storage.LockedMetric(metric) - # NOTE(gordc): no need to hold lock because the metric has been already - # marked as "deleted" in the indexer so no measure worker - # is going to process it anymore. - lock.release() - self._delete_metric(metric) - self.incoming.delete_unprocessed_measures_for_metric_id(metric.id) - - @staticmethod - def _delete_metric_measures(metric, timestamp_key, - aggregation, granularity, version=3): - raise NotImplementedError - - def refresh_metric(self, indexer, metric, timeout): - s = self.incoming.sack_for_metric(metric.id) - lock = self.incoming.get_sack_lock(self.coord, s) - if not lock.acquire(blocking=timeout): - raise SackLockTimeoutError( - 'Unable to refresh metric: %s. Metric is locked. ' - 'Please try again.' % metric.id) - try: - self.process_new_measures(indexer, [six.text_type(metric.id)]) - finally: - lock.release() - - def process_new_measures(self, indexer, metrics_to_process, - sync=False): - # process only active metrics. deleted metrics with unprocessed - # measures will be skipped until cleaned by janitor. - metrics = indexer.list_metrics(ids=metrics_to_process) - for metric in metrics: - # NOTE(gordc): must lock at sack level - try: - LOG.debug("Processing measures for %s", metric) - with self.incoming.process_measure_for_metric(metric) \ - as measures: - self._compute_and_store_timeseries(metric, measures) - LOG.debug("Measures for metric %s processed", metric) - except Exception: - if sync: - raise - LOG.error("Error processing new measures", exc_info=True) - - def _compute_and_store_timeseries(self, metric, measures): - # NOTE(mnaser): The metric could have been handled by - # another worker, ignore if no measures. - if len(measures) == 0: - LOG.debug("Skipping %s (already processed)", metric) - return - - measures = sorted(measures, key=operator.itemgetter(0)) - - agg_methods = list(metric.archive_policy.aggregation_methods) - block_size = metric.archive_policy.max_block_size - back_window = metric.archive_policy.back_window - definition = metric.archive_policy.definition - - try: - ts = self._get_unaggregated_timeserie_and_unserialize( - metric, block_size=block_size, back_window=back_window) - except storage.MetricDoesNotExist: - try: - self._create_metric(metric) - except storage.MetricAlreadyExists: - # Created in the mean time, do not worry - pass - ts = None - except CorruptionError as e: - LOG.error(e) - ts = None - - if ts is None: - # This is the first time we treat measures for this - # metric, or data are corrupted, create a new one - ts = carbonara.BoundTimeSerie(block_size=block_size, - back_window=back_window) - current_first_block_timestamp = None - else: - current_first_block_timestamp = ts.first_block_timestamp() - - # NOTE(jd) This is Python where you need such - # hack to pass a variable around a closure, - # sorry. - computed_points = {"number": 0} - - def _map_add_measures(bound_timeserie): - # NOTE (gordc): bound_timeserie is entire set of - # unaggregated measures matching largest - # granularity. the following takes only the points - # affected by new measures for specific granularity - tstamp = max(bound_timeserie.first, measures[0][0]) - new_first_block_timestamp = bound_timeserie.first_block_timestamp() - computed_points['number'] = len(bound_timeserie) - for d in definition: - ts = bound_timeserie.group_serie( - d.granularity, carbonara.round_timestamp( - tstamp, d.granularity * 10e8)) - - self._map_in_thread( - self._add_measures, - ((aggregation, d, metric, ts, - current_first_block_timestamp, - new_first_block_timestamp) - for aggregation in agg_methods)) - - with utils.StopWatch() as sw: - ts.set_values(measures, - before_truncate_callback=_map_add_measures, - ignore_too_old_timestamps=True) - - number_of_operations = (len(agg_methods) * len(definition)) - perf = "" - elapsed = sw.elapsed() - if elapsed > 0: - perf = " (%d points/s, %d measures/s)" % ( - ((number_of_operations * computed_points['number']) / - elapsed), - ((number_of_operations * len(measures)) / elapsed) - ) - LOG.debug("Computed new metric %s with %d new measures " - "in %.2f seconds%s", - metric.id, len(measures), elapsed, perf) - - self._store_unaggregated_timeserie(metric, ts.serialize()) - - def get_cross_metric_measures(self, metrics, from_timestamp=None, - to_timestamp=None, aggregation='mean', - reaggregation=None, resample=None, - granularity=None, needed_overlap=100.0, - fill=None): - super(CarbonaraBasedStorage, self).get_cross_metric_measures( - metrics, from_timestamp, to_timestamp, - aggregation, reaggregation, resample, granularity, needed_overlap) - - if reaggregation is None: - reaggregation = aggregation - - if granularity is None: - granularities = ( - definition.granularity - for metric in metrics - for definition in metric.archive_policy.definition - ) - granularities_in_common = [ - g - for g, occurrence in six.iteritems( - collections.Counter(granularities)) - if occurrence == len(metrics) - ] - - if not granularities_in_common: - raise storage.MetricUnaggregatable( - metrics, 'No granularity match') - else: - granularities_in_common = [granularity] - - if resample and granularity: - tss = self._map_in_thread(self._get_measures_timeserie, - [(metric, aggregation, granularity, - from_timestamp, to_timestamp) - for metric in metrics]) - for i, ts in enumerate(tss): - tss[i] = ts.resample(resample) - else: - tss = self._map_in_thread(self._get_measures_timeserie, - [(metric, aggregation, g, - from_timestamp, to_timestamp) - for metric in metrics - for g in granularities_in_common]) - - try: - return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v) - for timestamp, r, v - in carbonara.AggregatedTimeSerie.aggregated( - tss, reaggregation, from_timestamp, to_timestamp, - needed_overlap, fill)] - except carbonara.UnAggregableTimeseries as e: - raise storage.MetricUnaggregatable(metrics, e.reason) - - def _find_measure(self, metric, aggregation, granularity, predicate, - from_timestamp, to_timestamp): - timeserie = self._get_measures_timeserie( - metric, aggregation, granularity, - from_timestamp, to_timestamp) - values = timeserie.fetch(from_timestamp, to_timestamp) - return {metric: - [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), - g, value) - for timestamp, g, value in values - if predicate(value)]} - - def search_value(self, metrics, query, from_timestamp=None, - to_timestamp=None, aggregation='mean', - granularity=None): - granularity = granularity or [] - predicate = storage.MeasureQuery(query) - - results = self._map_in_thread( - self._find_measure, - [(metric, aggregation, - gran, predicate, - from_timestamp, to_timestamp) - for metric in metrics - for gran in granularity or - (defin.granularity - for defin in metric.archive_policy.definition)]) - result = collections.defaultdict(list) - for r in results: - for metric, metric_result in six.iteritems(r): - result[metric].extend(metric_result) - - # Sort the result - for metric, r in six.iteritems(result): - # Sort by timestamp asc, granularity desc - r.sort(key=lambda t: (t[0], - t[1])) - - return result - - @staticmethod - def _map_no_thread(method, list_of_args): - return list(itertools.starmap(method, list_of_args)) - - def _map_in_futures_threads(self, method, list_of_args): - with futures.ThreadPoolExecutor( - max_workers=self.aggregation_workers_number) as executor: - # We use 'list' to iterate all threads here to raise the first - # exception now, not much choice - return list(executor.map(lambda args: method(*args), list_of_args)) diff --git a/gnocchi/storage/ceph.py b/gnocchi/storage/ceph.py deleted file mode 100644 index 4de4d1b5c..000000000 --- a/gnocchi/storage/ceph.py +++ /dev/null @@ -1,203 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from gnocchi import storage -from gnocchi.storage import _carbonara -from gnocchi.storage.common import ceph - - -OPTS = [ - cfg.StrOpt('ceph_pool', - default='gnocchi', - help='Ceph pool name to use.'), - cfg.StrOpt('ceph_username', - help='Ceph username (ie: admin without "client." prefix).'), - cfg.StrOpt('ceph_secret', help='Ceph key', secret=True), - cfg.StrOpt('ceph_keyring', help='Ceph keyring path.'), - cfg.IntOpt('ceph_timeout', help='Ceph connection timeout'), - cfg.StrOpt('ceph_conffile', - default='/etc/ceph/ceph.conf', - help='Ceph configuration file.'), -] - -rados = ceph.rados - - -class CephStorage(_carbonara.CarbonaraBasedStorage): - WRITE_FULL = False - - def __init__(self, conf, incoming): - super(CephStorage, self).__init__(conf, incoming) - self.rados, self.ioctx = ceph.create_rados_connection(conf) - - def stop(self): - ceph.close_rados_connection(self.rados, self.ioctx) - super(CephStorage, self).stop() - - @staticmethod - def _get_object_name(metric, timestamp_key, aggregation, granularity, - version=3): - name = str("gnocchi_%s_%s_%s_%s" % ( - metric.id, timestamp_key, aggregation, granularity)) - return name + '_v%s' % version if version else name - - def _object_exists(self, name): - try: - self.ioctx.stat(name) - return True - except rados.ObjectNotFound: - return False - - def _create_metric(self, metric): - name = self._build_unaggregated_timeserie_path(metric, 3) - if self._object_exists(name): - raise storage.MetricAlreadyExists(metric) - else: - self.ioctx.write_full(name, b"") - - def _store_metric_measures(self, metric, timestamp_key, aggregation, - granularity, data, offset=None, version=3): - name = self._get_object_name(metric, timestamp_key, - aggregation, granularity, version) - if offset is None: - self.ioctx.write_full(name, data) - else: - self.ioctx.write(name, data, offset=offset) - with rados.WriteOpCtx() as op: - self.ioctx.set_omap(op, (name,), (b"",)) - self.ioctx.operate_write_op( - op, self._build_unaggregated_timeserie_path(metric, 3)) - - def _delete_metric_measures(self, metric, timestamp_key, aggregation, - granularity, version=3): - name = self._get_object_name(metric, timestamp_key, - aggregation, granularity, version) - - try: - self.ioctx.remove_object(name) - except rados.ObjectNotFound: - # It's possible that we already remove that object and then crashed - # before removing it from the OMAP key list; then no big deal - # anyway. - pass - - with rados.WriteOpCtx() as op: - self.ioctx.remove_omap_keys(op, (name,)) - self.ioctx.operate_write_op( - op, self._build_unaggregated_timeserie_path(metric, 3)) - - def _delete_metric(self, metric): - with rados.ReadOpCtx() as op: - omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1) - try: - self.ioctx.operate_read_op( - op, self._build_unaggregated_timeserie_path(metric, 3)) - except rados.ObjectNotFound: - return - - # NOTE(sileht): after reading the libradospy, I'm - # not sure that ret will have the correct value - # get_omap_vals transforms the C int to python int - # before operate_read_op is called, I dunno if the int - # content is copied during this transformation or if - # this is a pointer to the C int, I think it's copied... - try: - ceph.errno_to_exception(ret) - except rados.ObjectNotFound: - return - - ops = [self.ioctx.aio_remove(name) for name, _ in omaps] - - for op in ops: - op.wait_for_complete_and_cb() - - try: - self.ioctx.remove_object( - self._build_unaggregated_timeserie_path(metric, 3)) - except rados.ObjectNotFound: - # It's possible that the object does not exists - pass - - def _get_measures(self, metric, timestamp_key, aggregation, granularity, - version=3): - try: - name = self._get_object_name(metric, timestamp_key, - aggregation, granularity, version) - return self._get_object_content(name) - except rados.ObjectNotFound: - if self._object_exists( - self._build_unaggregated_timeserie_path(metric, 3)): - raise storage.AggregationDoesNotExist(metric, aggregation) - else: - raise storage.MetricDoesNotExist(metric) - - def _list_split_keys_for_metric(self, metric, aggregation, granularity, - version=3): - with rados.ReadOpCtx() as op: - omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1) - try: - self.ioctx.operate_read_op( - op, self._build_unaggregated_timeserie_path(metric, 3)) - except rados.ObjectNotFound: - raise storage.MetricDoesNotExist(metric) - - # NOTE(sileht): after reading the libradospy, I'm - # not sure that ret will have the correct value - # get_omap_vals transforms the C int to python int - # before operate_read_op is called, I dunno if the int - # content is copied during this transformation or if - # this is a pointer to the C int, I think it's copied... - try: - ceph.errno_to_exception(ret) - except rados.ObjectNotFound: - raise storage.MetricDoesNotExist(metric) - - keys = set() - for name, value in omaps: - meta = name.split('_') - if (aggregation == meta[3] and granularity == float(meta[4]) - and self._version_check(name, version)): - keys.add(meta[2]) - return keys - - @staticmethod - def _build_unaggregated_timeserie_path(metric, version): - return (('gnocchi_%s_none' % metric.id) - + ("_v%s" % version if version else "")) - - def _get_unaggregated_timeserie(self, metric, version=3): - try: - return self._get_object_content( - self._build_unaggregated_timeserie_path(metric, version)) - except rados.ObjectNotFound: - raise storage.MetricDoesNotExist(metric) - - def _store_unaggregated_timeserie(self, metric, data, version=3): - self.ioctx.write_full( - self._build_unaggregated_timeserie_path(metric, version), data) - - def _get_object_content(self, name): - offset = 0 - content = b'' - while True: - data = self.ioctx.read(name, offset=offset) - if not data: - break - content += data - offset += len(data) - return content diff --git a/gnocchi/storage/common/__init__.py b/gnocchi/storage/common/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/gnocchi/storage/common/ceph.py b/gnocchi/storage/common/ceph.py deleted file mode 100644 index b1c9b6739..000000000 --- a/gnocchi/storage/common/ceph.py +++ /dev/null @@ -1,100 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import errno - -from oslo_log import log - -LOG = log.getLogger(__name__) - - -for RADOS_MODULE_NAME in ('cradox', 'rados'): - try: - rados = __import__(RADOS_MODULE_NAME) - except ImportError: - pass - else: - break -else: - RADOS_MODULE_NAME = None - rados = None - -if rados is not None and hasattr(rados, 'run_in_thread'): - rados.run_in_thread = lambda target, args, timeout=None: target(*args) - LOG.info("rados.run_in_thread is monkeypatched.") - - -def create_rados_connection(conf): - options = {} - if conf.ceph_keyring: - options['keyring'] = conf.ceph_keyring - if conf.ceph_secret: - options['key'] = conf.ceph_secret - if conf.ceph_timeout: - options['rados_osd_op_timeout'] = conf.ceph_timeout - options['rados_mon_op_timeout'] = conf.ceph_timeout - options['client_mount_timeout'] = conf.ceph_timeout - - if not rados: - raise ImportError("No module named 'rados' nor 'cradox'") - - if not hasattr(rados, 'OmapIterator'): - raise ImportError("Your rados python module does not support " - "omap feature. Install 'cradox' (recommended) " - "or upgrade 'python-rados' >= 9.1.0 ") - - LOG.info("Ceph storage backend use '%s' python library", - RADOS_MODULE_NAME) - - # NOTE(sileht): librados handles reconnection itself, - # by default if a call timeout (30sec), it raises - # a rados.Timeout exception, and librados - # still continues to reconnect on the next call - conn = rados.Rados(conffile=conf.ceph_conffile, - rados_id=conf.ceph_username, - conf=options) - conn.connect() - ioctx = conn.open_ioctx(conf.ceph_pool) - return conn, ioctx - - -def close_rados_connection(conn, ioctx): - ioctx.aio_flush() - ioctx.close() - conn.shutdown() - - -# NOTE(sileht): The mapping is not part of the rados Public API So we copy it -# here. -EXCEPTION_NAMES = { - errno.EPERM: 'PermissionError', - errno.ENOENT: 'ObjectNotFound', - errno.EIO: 'IOError', - errno.ENOSPC: 'NoSpace', - errno.EEXIST: 'ObjectExists', - errno.EBUSY: 'ObjectBusy', - errno.ENODATA: 'NoData', - errno.EINTR: 'InterruptedOrTimeoutError', - errno.ETIMEDOUT: 'TimedOut', - errno.EACCES: 'PermissionDeniedError' -} - - -def errno_to_exception(ret): - if ret < 0: - name = EXCEPTION_NAMES.get(abs(ret)) - if name is None: - raise rados.Error("Unhandled error '%s'" % ret) - else: - raise getattr(rados, name) diff --git a/gnocchi/storage/common/redis.py b/gnocchi/storage/common/redis.py deleted file mode 100644 index 8491c369a..000000000 --- a/gnocchi/storage/common/redis.py +++ /dev/null @@ -1,129 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2017 Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import - -from six.moves.urllib import parse - -try: - import redis - from redis import sentinel -except ImportError: - redis = None - sentinel = None - -from gnocchi import utils - - -SEP = ':' - -CLIENT_ARGS = frozenset([ - 'db', - 'encoding', - 'retry_on_timeout', - 'socket_keepalive', - 'socket_timeout', - 'ssl', - 'ssl_certfile', - 'ssl_keyfile', - 'sentinel', - 'sentinel_fallback', -]) -""" -Keys that we allow to proxy from the coordinator configuration into the -redis client (used to configure the redis client internals so that -it works as you expect/want it to). - -See: http://redis-py.readthedocs.org/en/latest/#redis.Redis - -See: https://github.com/andymccurdy/redis-py/blob/2.10.3/redis/client.py -""" - -#: Client arguments that are expected/allowed to be lists. -CLIENT_LIST_ARGS = frozenset([ - 'sentinel_fallback', -]) - -#: Client arguments that are expected to be boolean convertible. -CLIENT_BOOL_ARGS = frozenset([ - 'retry_on_timeout', - 'ssl', -]) - -#: Client arguments that are expected to be int convertible. -CLIENT_INT_ARGS = frozenset([ - 'db', - 'socket_keepalive', - 'socket_timeout', -]) - -#: Default socket timeout to use when none is provided. -CLIENT_DEFAULT_SOCKET_TO = 30 - - -def get_client(conf): - if redis is None: - raise RuntimeError("python-redis unavailable") - parsed_url = parse.urlparse(conf.redis_url) - options = parse.parse_qs(parsed_url.query) - - kwargs = {} - if parsed_url.hostname: - kwargs['host'] = parsed_url.hostname - if parsed_url.port: - kwargs['port'] = parsed_url.port - else: - if not parsed_url.path: - raise ValueError("Expected socket path in parsed urls path") - kwargs['unix_socket_path'] = parsed_url.path - if parsed_url.password: - kwargs['password'] = parsed_url.password - - for a in CLIENT_ARGS: - if a not in options: - continue - if a in CLIENT_BOOL_ARGS: - v = utils.strtobool(options[a][-1]) - elif a in CLIENT_LIST_ARGS: - v = options[a] - elif a in CLIENT_INT_ARGS: - v = int(options[a][-1]) - else: - v = options[a][-1] - kwargs[a] = v - if 'socket_timeout' not in kwargs: - kwargs['socket_timeout'] = CLIENT_DEFAULT_SOCKET_TO - - # Ask the sentinel for the current master if there is a - # sentinel arg. - if 'sentinel' in kwargs: - sentinel_hosts = [ - tuple(fallback.split(':')) - for fallback in kwargs.get('sentinel_fallback', []) - ] - sentinel_hosts.insert(0, (kwargs['host'], kwargs['port'])) - sentinel_server = sentinel.Sentinel( - sentinel_hosts, - socket_timeout=kwargs['socket_timeout']) - sentinel_name = kwargs['sentinel'] - del kwargs['sentinel'] - if 'sentinel_fallback' in kwargs: - del kwargs['sentinel_fallback'] - master_client = sentinel_server.master_for(sentinel_name, **kwargs) - # The master_client is a redis.StrictRedis using a - # Sentinel managed connection pool. - return master_client - return redis.StrictRedis(**kwargs) diff --git a/gnocchi/storage/common/s3.py b/gnocchi/storage/common/s3.py deleted file mode 100644 index eb6c0660b..000000000 --- a/gnocchi/storage/common/s3.py +++ /dev/null @@ -1,81 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log -import tenacity -try: - import boto3 - import botocore.exceptions -except ImportError: - boto3 = None - botocore = None - -from gnocchi import utils - -LOG = log.getLogger(__name__) - - -def retry_if_operationaborted(exception): - return (isinstance(exception, botocore.exceptions.ClientError) - and exception.response['Error'].get('Code') == "OperationAborted") - - -def get_connection(conf): - if boto3 is None: - raise RuntimeError("boto3 unavailable") - conn = boto3.client( - 's3', - endpoint_url=conf.s3_endpoint_url, - region_name=conf.s3_region_name, - aws_access_key_id=conf.s3_access_key_id, - aws_secret_access_key=conf.s3_secret_access_key) - return conn, conf.s3_region_name, conf.s3_bucket_prefix - - -# NOTE(jd) OperationAborted might be raised if we try to create the bucket -# for the first time at the same time -@tenacity.retry( - stop=tenacity.stop_after_attempt(10), - wait=tenacity.wait_fixed(0.5), - retry=tenacity.retry_if_exception(retry_if_operationaborted) -) -def create_bucket(conn, name, region_name): - if region_name: - kwargs = dict(CreateBucketConfiguration={ - "LocationConstraint": region_name, - }) - else: - kwargs = {} - return conn.create_bucket(Bucket=name, **kwargs) - - -def bulk_delete(conn, bucket, objects): - # NOTE(jd) The maximum object to delete at once is 1000 - # TODO(jd) Parallelize? - deleted = 0 - for obj_slice in utils.grouper(objects, 1000): - d = { - 'Objects': [{'Key': o} for o in obj_slice], - # FIXME(jd) Use Quiet mode, but s3rver does not seem to - # support it - # 'Quiet': True, - } - response = conn.delete_objects( - Bucket=bucket, - Delete=d) - deleted += len(response['Deleted']) - LOG.debug('%s objects deleted, %s objects skipped', - deleted, len(objects) - deleted) diff --git a/gnocchi/storage/common/swift.py b/gnocchi/storage/common/swift.py deleted file mode 100644 index 5d4ff47ee..000000000 --- a/gnocchi/storage/common/swift.py +++ /dev/null @@ -1,70 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from oslo_log import log -from six.moves.urllib.parse import quote - -try: - from swiftclient import client as swclient - from swiftclient import utils as swift_utils -except ImportError: - swclient = None - swift_utils = None - -from gnocchi import storage -from gnocchi import utils - -LOG = log.getLogger(__name__) - - -@utils.retry -def _get_connection(conf): - return swclient.Connection( - auth_version=conf.swift_auth_version, - authurl=conf.swift_authurl, - preauthtoken=conf.swift_preauthtoken, - user=conf.swift_user, - key=conf.swift_key, - tenant_name=conf.swift_project_name, - timeout=conf.swift_timeout, - os_options={'endpoint_type': conf.swift_endpoint_type, - 'user_domain_name': conf.swift_user_domain_name}, - retries=0) - - -def get_connection(conf): - if swclient is None: - raise RuntimeError("python-swiftclient unavailable") - - return _get_connection(conf) - - -POST_HEADERS = {'Accept': 'application/json', 'Content-Type': 'text/plain'} - - -def bulk_delete(conn, container, objects): - objects = [quote(('/%s/%s' % (container, obj['name'])).encode('utf-8')) - for obj in objects] - resp = {} - headers, body = conn.post_account( - headers=POST_HEADERS, query_string='bulk-delete', - data=b''.join(obj.encode('utf-8') + b'\n' for obj in objects), - response_dict=resp) - if resp['status'] != 200: - raise storage.StorageError( - "Unable to bulk-delete, is bulk-delete enabled in Swift?") - resp = swift_utils.parse_api_response(headers, body) - LOG.debug('# of objects deleted: %s, # of objects skipped: %s', - resp['Number Deleted'], resp['Number Not Found']) diff --git a/gnocchi/storage/file.py b/gnocchi/storage/file.py deleted file mode 100644 index 3c067befa..000000000 --- a/gnocchi/storage/file.py +++ /dev/null @@ -1,151 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2014 Objectif Libre -# Copyright © 2015 Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import errno -import os -import shutil -import tempfile - -from oslo_config import cfg - -from gnocchi import storage -from gnocchi.storage import _carbonara -from gnocchi import utils - - -OPTS = [ - cfg.StrOpt('file_basepath', - default='/var/lib/gnocchi', - help='Path used to store gnocchi data files.'), -] - - -class FileStorage(_carbonara.CarbonaraBasedStorage): - WRITE_FULL = True - - def __init__(self, conf, incoming): - super(FileStorage, self).__init__(conf, incoming) - self.basepath = conf.file_basepath - self.basepath_tmp = os.path.join(self.basepath, 'tmp') - utils.ensure_paths([self.basepath_tmp]) - - def _atomic_file_store(self, dest, data): - tmpfile = tempfile.NamedTemporaryFile( - prefix='gnocchi', dir=self.basepath_tmp, - delete=False) - tmpfile.write(data) - tmpfile.close() - os.rename(tmpfile.name, dest) - - def _build_metric_dir(self, metric): - return os.path.join(self.basepath, str(metric.id)) - - def _build_unaggregated_timeserie_path(self, metric, version=3): - return os.path.join( - self._build_metric_dir(metric), - 'none' + ("_v%s" % version if version else "")) - - def _build_metric_path(self, metric, aggregation): - return os.path.join(self._build_metric_dir(metric), - "agg_" + aggregation) - - def _build_metric_path_for_split(self, metric, aggregation, - timestamp_key, granularity, version=3): - path = os.path.join(self._build_metric_path(metric, aggregation), - timestamp_key + "_" + str(granularity)) - return path + '_v%s' % version if version else path - - def _create_metric(self, metric): - path = self._build_metric_dir(metric) - try: - os.mkdir(path, 0o750) - except OSError as e: - if e.errno == errno.EEXIST: - raise storage.MetricAlreadyExists(metric) - raise - for agg in metric.archive_policy.aggregation_methods: - try: - os.mkdir(self._build_metric_path(metric, agg), 0o750) - except OSError as e: - if e.errno != errno.EEXIST: - raise - - def _store_unaggregated_timeserie(self, metric, data, version=3): - self._atomic_file_store( - self._build_unaggregated_timeserie_path(metric, version), - data) - - def _get_unaggregated_timeserie(self, metric, version=3): - path = self._build_unaggregated_timeserie_path(metric, version) - try: - with open(path, 'rb') as f: - return f.read() - except IOError as e: - if e.errno == errno.ENOENT: - raise storage.MetricDoesNotExist(metric) - raise - - def _list_split_keys_for_metric(self, metric, aggregation, granularity, - version=3): - try: - files = os.listdir(self._build_metric_path(metric, aggregation)) - except OSError as e: - if e.errno == errno.ENOENT: - raise storage.MetricDoesNotExist(metric) - raise - keys = set() - for f in files: - meta = f.split("_") - if meta[1] == str(granularity) and self._version_check(f, version): - keys.add(meta[0]) - return keys - - def _delete_metric_measures(self, metric, timestamp_key, aggregation, - granularity, version=3): - os.unlink(self._build_metric_path_for_split( - metric, aggregation, timestamp_key, granularity, version)) - - def _store_metric_measures(self, metric, timestamp_key, aggregation, - granularity, data, offset=None, version=3): - self._atomic_file_store( - self._build_metric_path_for_split(metric, aggregation, - timestamp_key, granularity, - version), - data) - - def _delete_metric(self, metric): - path = self._build_metric_dir(metric) - try: - shutil.rmtree(path) - except OSError as e: - if e.errno != errno.ENOENT: - # NOTE(jd) Maybe the metric has never been created (no - # measures) - raise - - def _get_measures(self, metric, timestamp_key, aggregation, granularity, - version=3): - path = self._build_metric_path_for_split( - metric, aggregation, timestamp_key, granularity, version) - try: - with open(path, 'rb') as aggregation_file: - return aggregation_file.read() - except IOError as e: - if e.errno == errno.ENOENT: - if os.path.exists(self._build_metric_dir(metric)): - raise storage.AggregationDoesNotExist(metric, aggregation) - raise storage.MetricDoesNotExist(metric) - raise diff --git a/gnocchi/storage/incoming/__init__.py b/gnocchi/storage/incoming/__init__.py deleted file mode 100644 index eb99ae4da..000000000 --- a/gnocchi/storage/incoming/__init__.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2017 Red Hat, Inc. -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from gnocchi import exceptions - - -class ReportGenerationError(Exception): - pass - - -class StorageDriver(object): - - @staticmethod - def __init__(conf): - pass - - @staticmethod - def upgrade(indexer): - pass - - def add_measures(self, metric, measures): - """Add a measure to a metric. - - :param metric: The metric measured. - :param measures: The actual measures. - """ - self.add_measures_batch({metric: measures}) - - @staticmethod - def add_measures_batch(metrics_and_measures): - """Add a batch of measures for some metrics. - - :param metrics_and_measures: A dict where keys - are metrics and value are measure. - """ - raise exceptions.NotImplementedError - - def measures_report(details=True): - """Return a report of pending to process measures. - - Only useful for drivers that process measurements in background - - :return: {'summary': {'metrics': count, 'measures': count}, - 'details': {metric_id: pending_measures_count}} - """ - raise exceptions.NotImplementedError - - @staticmethod - def list_metric_with_measures_to_process(sack): - raise NotImplementedError diff --git a/gnocchi/storage/incoming/_carbonara.py b/gnocchi/storage/incoming/_carbonara.py deleted file mode 100644 index e20720d6b..000000000 --- a/gnocchi/storage/incoming/_carbonara.py +++ /dev/null @@ -1,138 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016 Red Hat, Inc. -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from concurrent import futures -import itertools -import struct - -from oslo_log import log -import pandas -import six - -from gnocchi.storage import incoming -from gnocchi import utils - -LOG = log.getLogger(__name__) - -_NUM_WORKERS = utils.get_default_workers() - - -class CarbonaraBasedStorage(incoming.StorageDriver): - MEASURE_PREFIX = "measure" - SACK_PREFIX = "incoming" - CFG_PREFIX = 'gnocchi-config' - CFG_SACKS = 'sacks' - _MEASURE_SERIAL_FORMAT = "Qd" - _MEASURE_SERIAL_LEN = struct.calcsize(_MEASURE_SERIAL_FORMAT) - - @property - def NUM_SACKS(self): - if not hasattr(self, '_num_sacks'): - try: - self._num_sacks = int(self.get_storage_sacks()) - except Exception as e: - LOG.error('Unable to detect the number of storage sacks. ' - 'Ensure gnocchi-upgrade has been executed: %s', e) - raise - return self._num_sacks - - def get_sack_prefix(self, num_sacks=None): - sacks = num_sacks if num_sacks else self.NUM_SACKS - return self.SACK_PREFIX + str(sacks) + '-%s' - - def upgrade(self, index, num_sacks): - super(CarbonaraBasedStorage, self).upgrade(index) - if not self.get_storage_sacks(): - self.set_storage_settings(num_sacks) - - @staticmethod - def get_storage_sacks(): - """Return the number of sacks in storage. None if not set.""" - raise NotImplementedError - - @staticmethod - def set_storage_settings(num_sacks): - raise NotImplementedError - - @staticmethod - def remove_sack_group(num_sacks): - raise NotImplementedError - - @staticmethod - def get_sack_lock(coord, sack): - lock_name = b'gnocchi-sack-%s-lock' % str(sack).encode('ascii') - return coord.get_lock(lock_name) - - def _unserialize_measures(self, measure_id, data): - nb_measures = len(data) // self._MEASURE_SERIAL_LEN - try: - measures = struct.unpack( - "<" + self._MEASURE_SERIAL_FORMAT * nb_measures, data) - except struct.error: - LOG.error( - "Unable to decode measure %s, possible data corruption", - measure_id) - raise - return six.moves.zip( - pandas.to_datetime(measures[::2], unit='ns'), - itertools.islice(measures, 1, len(measures), 2)) - - def _encode_measures(self, measures): - measures = list(measures) - return struct.pack( - "<" + self._MEASURE_SERIAL_FORMAT * len(measures), - *list(itertools.chain.from_iterable(measures))) - - def add_measures_batch(self, metrics_and_measures): - with futures.ThreadPoolExecutor(max_workers=_NUM_WORKERS) as executor: - list(executor.map( - lambda args: self._store_new_measures(*args), - ((metric, self._encode_measures(measures)) - for metric, measures - in six.iteritems(metrics_and_measures)))) - - @staticmethod - def _store_new_measures(metric, data): - raise NotImplementedError - - def measures_report(self, details=True): - metrics, measures, full_details = self._build_report(details) - report = {'summary': {'metrics': metrics, 'measures': measures}} - if full_details is not None: - report['details'] = full_details - return report - - @staticmethod - def _build_report(details): - raise NotImplementedError - - @staticmethod - def delete_unprocessed_measures_for_metric_id(metric_id): - raise NotImplementedError - - @staticmethod - def process_measure_for_metric(metric): - raise NotImplementedError - - @staticmethod - def has_unprocessed(metric): - raise NotImplementedError - - def sack_for_metric(self, metric_id): - return metric_id.int % self.NUM_SACKS - - def get_sack_name(self, sack): - return self.get_sack_prefix() % sack diff --git a/gnocchi/storage/incoming/ceph.py b/gnocchi/storage/incoming/ceph.py deleted file mode 100644 index 15777a522..000000000 --- a/gnocchi/storage/incoming/ceph.py +++ /dev/null @@ -1,225 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from collections import defaultdict -import contextlib -import datetime -import json -import uuid - -import six - -from gnocchi.storage.common import ceph -from gnocchi.storage.incoming import _carbonara - -rados = ceph.rados - - -class CephStorage(_carbonara.CarbonaraBasedStorage): - - Q_LIMIT = 1000 - - def __init__(self, conf): - super(CephStorage, self).__init__(conf) - self.rados, self.ioctx = ceph.create_rados_connection(conf) - # NOTE(sileht): constants can't be class attributes because - # they rely on presence of rados module - - # NOTE(sileht): We allow to read the measure object on - # outdated replicats, that safe for us, we will - # get the new stuffs on next metricd pass. - self.OMAP_READ_FLAGS = (rados.LIBRADOS_OPERATION_BALANCE_READS | - rados.LIBRADOS_OPERATION_SKIPRWLOCKS) - - # NOTE(sileht): That should be safe to manipulate the omap keys - # with any OSDs at the same times, each osd should replicate the - # new key to others and same thing for deletion. - # I wonder how ceph handle rm_omap and set_omap run at same time - # on the same key. I assume the operation are timestamped so that will - # be same. If not, they are still one acceptable race here, a rm_omap - # can finish before all replicats of set_omap are done, but we don't - # care, if that occurs next metricd run, will just remove it again, no - # object with the measure have already been delected by previous, so - # we are safe and good. - self.OMAP_WRITE_FLAGS = rados.LIBRADOS_OPERATION_SKIPRWLOCKS - - def stop(self): - ceph.close_rados_connection(self.rados, self.ioctx) - super(CephStorage, self).stop() - - def get_storage_sacks(self): - try: - return json.loads( - self.ioctx.read(self.CFG_PREFIX).decode())[self.CFG_SACKS] - except rados.ObjectNotFound: - return - - def set_storage_settings(self, num_sacks): - self.ioctx.write_full(self.CFG_PREFIX, - json.dumps({self.CFG_SACKS: num_sacks}).encode()) - - def remove_sack_group(self, num_sacks): - prefix = self.get_sack_prefix(num_sacks) - for i in six.moves.xrange(num_sacks): - try: - self.ioctx.remove_object(prefix % i) - except rados.ObjectNotFound: - pass - - def add_measures_batch(self, metrics_and_measures): - data_by_sack = defaultdict(lambda: defaultdict(list)) - for metric, measures in six.iteritems(metrics_and_measures): - name = "_".join(( - self.MEASURE_PREFIX, - str(metric.id), - str(uuid.uuid4()), - datetime.datetime.utcnow().strftime("%Y%m%d_%H:%M:%S"))) - sack = self.get_sack_name(self.sack_for_metric(metric.id)) - data_by_sack[sack]['names'].append(name) - data_by_sack[sack]['measures'].append( - self._encode_measures(measures)) - - ops = [] - for sack, data in data_by_sack.items(): - with rados.WriteOpCtx() as op: - # NOTE(sileht): list all objects in a pool is too slow with - # many objects (2min for 20000 objects in 50osds cluster), - # and enforce us to iterrate over all objects - # So we create an object MEASURE_PREFIX, that have as - # omap the list of objects to process (not xattr because - # it doesn't # allow to configure the locking behavior) - self.ioctx.set_omap(op, tuple(data['names']), - tuple(data['measures'])) - ops.append(self.ioctx.operate_aio_write_op( - op, sack, flags=self.OMAP_WRITE_FLAGS)) - while ops: - op = ops.pop() - op.wait_for_complete() - - def _build_report(self, details): - metrics = set() - count = 0 - metric_details = defaultdict(int) - for i in six.moves.range(self.NUM_SACKS): - marker = "" - while True: - names = list(self._list_keys_to_process( - i, marker=marker, limit=self.Q_LIMIT)) - if names and names[0] < marker: - raise _carbonara.ReportGenerationError("Unable to cleanly " - "compute backlog.") - for name in names: - count += 1 - metric = name.split("_")[1] - metrics.add(metric) - if details: - metric_details[metric] += 1 - if len(names) < self.Q_LIMIT: - break - else: - marker = name - - return len(metrics), count, metric_details if details else None - - def _list_keys_to_process(self, sack, prefix="", marker="", limit=-1): - with rados.ReadOpCtx() as op: - omaps, ret = self.ioctx.get_omap_vals(op, marker, prefix, limit) - try: - self.ioctx.operate_read_op( - op, self.get_sack_name(sack), flag=self.OMAP_READ_FLAGS) - except rados.ObjectNotFound: - # API have still written nothing - return () - # NOTE(sileht): after reading the libradospy, I'm - # not sure that ret will have the correct value - # get_omap_vals transforms the C int to python int - # before operate_read_op is called, I dunno if the int - # content is copied during this transformation or if - # this is a pointer to the C int, I think it's copied... - try: - ceph.errno_to_exception(ret) - except rados.ObjectNotFound: - return () - - return (k for k, v in omaps) - - def list_metric_with_measures_to_process(self, sack): - names = set() - marker = "" - while True: - obj_names = list(self._list_keys_to_process( - sack, marker=marker, limit=self.Q_LIMIT)) - names.update(name.split("_")[1] for name in obj_names) - if len(obj_names) < self.Q_LIMIT: - break - else: - marker = obj_names[-1] - return names - - def delete_unprocessed_measures_for_metric_id(self, metric_id): - sack = self.sack_for_metric(metric_id) - key_prefix = self.MEASURE_PREFIX + "_" + str(metric_id) - keys = tuple(self._list_keys_to_process(sack, key_prefix)) - - if not keys: - return - - # Now clean objects and omap - with rados.WriteOpCtx() as op: - # NOTE(sileht): come on Ceph, no return code - # for this operation ?!! - self.ioctx.remove_omap_keys(op, keys) - self.ioctx.operate_write_op(op, self.get_sack_name(sack), - flags=self.OMAP_WRITE_FLAGS) - - def has_unprocessed(self, metric): - sack = self.sack_for_metric(metric.id) - object_prefix = self.MEASURE_PREFIX + "_" + str(metric.id) - return bool(self._list_keys_to_process(sack, object_prefix)) - - @contextlib.contextmanager - def process_measure_for_metric(self, metric): - sack = self.sack_for_metric(metric.id) - key_prefix = self.MEASURE_PREFIX + "_" + str(metric.id) - - measures = [] - processed_keys = [] - with rados.ReadOpCtx() as op: - omaps, ret = self.ioctx.get_omap_vals(op, "", key_prefix, -1) - self.ioctx.operate_read_op(op, self.get_sack_name(sack), - flag=self.OMAP_READ_FLAGS) - # NOTE(sileht): after reading the libradospy, I'm - # not sure that ret will have the correct value - # get_omap_vals transforms the C int to python int - # before operate_read_op is called, I dunno if the int - # content is copied during this transformation or if - # this is a pointer to the C int, I think it's copied... - try: - ceph.errno_to_exception(ret) - except rados.ObjectNotFound: - # Object has been deleted, so this is just a stalled entry - # in the OMAP listing, ignore - return - for k, v in omaps: - measures.extend(self._unserialize_measures(k, v)) - processed_keys.append(k) - - yield measures - - # Now clean omap - with rados.WriteOpCtx() as op: - # NOTE(sileht): come on Ceph, no return code - # for this operation ?!! - self.ioctx.remove_omap_keys(op, tuple(processed_keys)) - self.ioctx.operate_write_op(op, self.get_sack_name(sack), - flags=self.OMAP_WRITE_FLAGS) diff --git a/gnocchi/storage/incoming/file.py b/gnocchi/storage/incoming/file.py deleted file mode 100644 index 781d3ec5c..000000000 --- a/gnocchi/storage/incoming/file.py +++ /dev/null @@ -1,165 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import contextlib -import datetime -import errno -import json -import os -import shutil -import tempfile -import uuid - -import six - -from gnocchi.storage.incoming import _carbonara -from gnocchi import utils - - -class FileStorage(_carbonara.CarbonaraBasedStorage): - def __init__(self, conf): - super(FileStorage, self).__init__(conf) - self.basepath = conf.file_basepath - self.basepath_tmp = os.path.join(self.basepath, 'tmp') - - def upgrade(self, index, num_sacks): - super(FileStorage, self).upgrade(index, num_sacks) - utils.ensure_paths([self.basepath_tmp]) - - def get_storage_sacks(self): - try: - with open(os.path.join(self.basepath_tmp, self.CFG_PREFIX), - 'r') as f: - return json.load(f)[self.CFG_SACKS] - except IOError as e: - if e.errno == errno.ENOENT: - return - raise - - def set_storage_settings(self, num_sacks): - data = {self.CFG_SACKS: num_sacks} - with open(os.path.join(self.basepath_tmp, self.CFG_PREFIX), 'w') as f: - json.dump(data, f) - utils.ensure_paths([self._sack_path(i) - for i in six.moves.range(self.NUM_SACKS)]) - - def remove_sack_group(self, num_sacks): - prefix = self.get_sack_prefix(num_sacks) - for i in six.moves.xrange(num_sacks): - shutil.rmtree(os.path.join(self.basepath, prefix % i)) - - def _sack_path(self, sack): - return os.path.join(self.basepath, self.get_sack_name(sack)) - - def _measure_path(self, sack, metric_id): - return os.path.join(self._sack_path(sack), six.text_type(metric_id)) - - def _build_measure_path(self, metric_id, random_id=None): - sack = self.sack_for_metric(metric_id) - path = self._measure_path(sack, metric_id) - if random_id: - if random_id is True: - now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") - random_id = six.text_type(uuid.uuid4()) + now - return os.path.join(path, random_id) - return path - - def _store_new_measures(self, metric, data): - tmpfile = tempfile.NamedTemporaryFile( - prefix='gnocchi', dir=self.basepath_tmp, - delete=False) - tmpfile.write(data) - tmpfile.close() - path = self._build_measure_path(metric.id, True) - while True: - try: - os.rename(tmpfile.name, path) - break - except OSError as e: - if e.errno != errno.ENOENT: - raise - try: - os.mkdir(self._build_measure_path(metric.id)) - except OSError as e: - # NOTE(jd) It's possible that another process created the - # path just before us! In this case, good for us, let's do - # nothing then! (see bug #1475684) - if e.errno != errno.EEXIST: - raise - - def _build_report(self, details): - metric_details = {} - for i in six.moves.range(self.NUM_SACKS): - for metric in self.list_metric_with_measures_to_process(i): - metric_details[metric] = len( - self._list_measures_container_for_metric_id_str(i, metric)) - return (len(metric_details.keys()), sum(metric_details.values()), - metric_details if details else None) - - def list_metric_with_measures_to_process(self, sack): - return set(self._list_target(self._sack_path(sack))) - - def _list_measures_container_for_metric_id_str(self, sack, metric_id): - return self._list_target(self._measure_path(sack, metric_id)) - - def _list_measures_container_for_metric_id(self, metric_id): - return self._list_target(self._build_measure_path(metric_id)) - - @staticmethod - def _list_target(target): - try: - return os.listdir(target) - except OSError as e: - # Some other process treated this one, then do nothing - if e.errno == errno.ENOENT: - return [] - raise - - def _delete_measures_files_for_metric_id(self, metric_id, files): - for f in files: - try: - os.unlink(self._build_measure_path(metric_id, f)) - except OSError as e: - # Another process deleted it in the meantime, no prob' - if e.errno != errno.ENOENT: - raise - try: - os.rmdir(self._build_measure_path(metric_id)) - except OSError as e: - # ENOENT: ok, it has been removed at almost the same time - # by another process - # ENOTEMPTY: ok, someone pushed measure in the meantime, - # we'll delete the measures and directory later - # EEXIST: some systems use this instead of ENOTEMPTY - if e.errno not in (errno.ENOENT, errno.ENOTEMPTY, errno.EEXIST): - raise - - def delete_unprocessed_measures_for_metric_id(self, metric_id): - files = self._list_measures_container_for_metric_id(metric_id) - self._delete_measures_files_for_metric_id(metric_id, files) - - def has_unprocessed(self, metric): - return os.path.isdir(self._build_measure_path(metric.id)) - - @contextlib.contextmanager - def process_measure_for_metric(self, metric): - files = self._list_measures_container_for_metric_id(metric.id) - measures = [] - for f in files: - abspath = self._build_measure_path(metric.id, f) - with open(abspath, "rb") as e: - measures.extend(self._unserialize_measures(f, e.read())) - - yield measures - - self._delete_measures_files_for_metric_id(metric.id, files) diff --git a/gnocchi/storage/incoming/redis.py b/gnocchi/storage/incoming/redis.py deleted file mode 100644 index 9e81327c8..000000000 --- a/gnocchi/storage/incoming/redis.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2017 Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import collections -import contextlib - -import six - -from gnocchi.storage.common import redis -from gnocchi.storage.incoming import _carbonara - - -class RedisStorage(_carbonara.CarbonaraBasedStorage): - - def __init__(self, conf): - super(RedisStorage, self).__init__(conf) - self._client = redis.get_client(conf) - - def get_storage_sacks(self): - return self._client.hget(self.CFG_PREFIX, self.CFG_SACKS) - - def set_storage_settings(self, num_sacks): - self._client.hset(self.CFG_PREFIX, self.CFG_SACKS, num_sacks) - - @staticmethod - def remove_sack_group(num_sacks): - # NOTE(gordc): redis doesn't maintain keys with empty values - pass - - def _build_measure_path(self, metric_id): - return redis.SEP.join([ - self.get_sack_name(self.sack_for_metric(metric_id)), - six.text_type(metric_id)]) - - def _store_new_measures(self, metric, data): - path = self._build_measure_path(metric.id) - self._client.rpush(path, data) - - def _build_report(self, details): - match = redis.SEP.join([self.get_sack_name("*"), "*"]) - metric_details = collections.defaultdict(int) - for key in self._client.scan_iter(match=match, count=1000): - metric = key.decode('utf8').split(redis.SEP)[1] - metric_details[metric] = self._client.llen(key) - return (len(metric_details.keys()), sum(metric_details.values()), - metric_details if details else None) - - def list_metric_with_measures_to_process(self, sack): - match = redis.SEP.join([self.get_sack_name(sack), "*"]) - keys = self._client.scan_iter(match=match, count=1000) - return set([k.decode('utf8').split(redis.SEP)[1] for k in keys]) - - def delete_unprocessed_measures_for_metric_id(self, metric_id): - self._client.delete(self._build_measure_path(metric_id)) - - def has_unprocessed(self, metric): - return bool(self._client.exists(self._build_measure_path(metric.id))) - - @contextlib.contextmanager - def process_measure_for_metric(self, metric): - key = self._build_measure_path(metric.id) - item_len = self._client.llen(key) - # lrange is inclusive on both ends, decrease to grab exactly n items - item_len = item_len - 1 if item_len else item_len - measures = [] - for i, data in enumerate(self._client.lrange(key, 0, item_len)): - measures.extend(self._unserialize_measures( - '%s-%s' % (metric.id, i), data)) - - yield measures - - # ltrim is inclusive, bump 1 to remove up to and including nth item - self._client.ltrim(key, item_len + 1, -1) diff --git a/gnocchi/storage/incoming/s3.py b/gnocchi/storage/incoming/s3.py deleted file mode 100644 index 89de4192f..000000000 --- a/gnocchi/storage/incoming/s3.py +++ /dev/null @@ -1,177 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from collections import defaultdict -import contextlib -import datetime -import json -import uuid - -import six - -from gnocchi.storage.common import s3 -from gnocchi.storage.incoming import _carbonara - -boto3 = s3.boto3 -botocore = s3.botocore - - -class S3Storage(_carbonara.CarbonaraBasedStorage): - - def __init__(self, conf): - super(S3Storage, self).__init__(conf) - self.s3, self._region_name, self._bucket_prefix = ( - s3.get_connection(conf) - ) - - self._bucket_name_measures = ( - self._bucket_prefix + "-" + self.MEASURE_PREFIX - ) - - def get_storage_sacks(self): - try: - response = self.s3.get_object(Bucket=self._bucket_name_measures, - Key=self.CFG_PREFIX) - return json.loads(response['Body'].read().decode())[self.CFG_SACKS] - except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') == "NoSuchKey": - return - - def set_storage_settings(self, num_sacks): - data = {self.CFG_SACKS: num_sacks} - self.s3.put_object(Bucket=self._bucket_name_measures, - Key=self.CFG_PREFIX, - Body=json.dumps(data).encode()) - - def get_sack_prefix(self, num_sacks=None): - # NOTE(gordc): override to follow s3 partitioning logic - return '%s-' + ('%s/' % (num_sacks if num_sacks else self.NUM_SACKS)) - - @staticmethod - def remove_sack_group(num_sacks): - # nothing to cleanup since sacks are part of path - pass - - def upgrade(self, indexer, num_sacks): - try: - s3.create_bucket(self.s3, self._bucket_name_measures, - self._region_name) - except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') not in ( - "BucketAlreadyExists", "BucketAlreadyOwnedByYou" - ): - raise - # need to create bucket first to store storage settings object - super(S3Storage, self).upgrade(indexer, num_sacks) - - def _store_new_measures(self, metric, data): - now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") - self.s3.put_object( - Bucket=self._bucket_name_measures, - Key=(self.get_sack_name(self.sack_for_metric(metric.id)) - + six.text_type(metric.id) + "/" - + six.text_type(uuid.uuid4()) + now), - Body=data) - - def _build_report(self, details): - metric_details = defaultdict(int) - response = {} - while response.get('IsTruncated', True): - if 'NextContinuationToken' in response: - kwargs = { - 'ContinuationToken': response['NextContinuationToken'] - } - else: - kwargs = {} - response = self.s3.list_objects_v2( - Bucket=self._bucket_name_measures, - **kwargs) - # FIXME(gordc): this can be streamlined if not details - for c in response.get('Contents', ()): - if c['Key'] != self.CFG_PREFIX: - __, metric, metric_file = c['Key'].split("/", 2) - metric_details[metric] += 1 - return (len(metric_details), sum(metric_details.values()), - metric_details if details else None) - - def list_metric_with_measures_to_process(self, sack): - limit = 1000 # 1000 is the default anyway - metrics = set() - response = {} - # Handle pagination - while response.get('IsTruncated', True): - if 'NextContinuationToken' in response: - kwargs = { - 'ContinuationToken': response['NextContinuationToken'] - } - else: - kwargs = {} - response = self.s3.list_objects_v2( - Bucket=self._bucket_name_measures, - Prefix=self.get_sack_name(sack), - Delimiter="/", - MaxKeys=limit, - **kwargs) - for p in response.get('CommonPrefixes', ()): - metrics.add(p['Prefix'].split('/', 2)[1]) - return metrics - - def _list_measure_files_for_metric_id(self, sack, metric_id): - files = set() - response = {} - while response.get('IsTruncated', True): - if 'NextContinuationToken' in response: - kwargs = { - 'ContinuationToken': response['NextContinuationToken'] - } - else: - kwargs = {} - response = self.s3.list_objects_v2( - Bucket=self._bucket_name_measures, - Prefix=(self.get_sack_name(sack) - + six.text_type(metric_id) + "/"), - **kwargs) - - for c in response.get('Contents', ()): - files.add(c['Key']) - - return files - - def delete_unprocessed_measures_for_metric_id(self, metric_id): - sack = self.sack_for_metric(metric_id) - files = self._list_measure_files_for_metric_id(sack, metric_id) - s3.bulk_delete(self.s3, self._bucket_name_measures, files) - - def has_unprocessed(self, metric): - sack = self.sack_for_metric(metric.id) - return bool(self._list_measure_files_for_metric_id(sack, metric.id)) - - @contextlib.contextmanager - def process_measure_for_metric(self, metric): - sack = self.sack_for_metric(metric.id) - files = self._list_measure_files_for_metric_id(sack, metric.id) - - measures = [] - for f in files: - response = self.s3.get_object( - Bucket=self._bucket_name_measures, - Key=f) - measures.extend( - self._unserialize_measures(f, response['Body'].read())) - - yield measures - - # Now clean objects - s3.bulk_delete(self.s3, self._bucket_name_measures, files) diff --git a/gnocchi/storage/incoming/swift.py b/gnocchi/storage/incoming/swift.py deleted file mode 100644 index 304126f9a..000000000 --- a/gnocchi/storage/incoming/swift.py +++ /dev/null @@ -1,114 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from collections import defaultdict -import contextlib -import datetime -import json -import uuid - -import six - -from gnocchi.storage.common import swift -from gnocchi.storage.incoming import _carbonara - -swclient = swift.swclient -swift_utils = swift.swift_utils - - -class SwiftStorage(_carbonara.CarbonaraBasedStorage): - def __init__(self, conf): - super(SwiftStorage, self).__init__(conf) - self.swift = swift.get_connection(conf) - - def get_storage_sacks(self): - try: - __, data = self.swift.get_object(self.CFG_PREFIX, self.CFG_PREFIX) - return json.loads(data)[self.CFG_SACKS] - except swclient.ClientException as e: - if e.http_status == 404: - return - - def set_storage_settings(self, num_sacks): - self.swift.put_container(self.CFG_PREFIX) - self.swift.put_object(self.CFG_PREFIX, self.CFG_PREFIX, - json.dumps({self.CFG_SACKS: num_sacks})) - for i in six.moves.range(self.NUM_SACKS): - self.swift.put_container(self.get_sack_name(i)) - - def remove_sack_group(self, num_sacks): - prefix = self.get_sack_prefix(num_sacks) - for i in six.moves.xrange(num_sacks): - self.swift.delete_container(prefix % i) - - def _store_new_measures(self, metric, data): - now = datetime.datetime.utcnow().strftime("_%Y%m%d_%H:%M:%S") - self.swift.put_object( - self.get_sack_name(self.sack_for_metric(metric.id)), - six.text_type(metric.id) + "/" + six.text_type(uuid.uuid4()) + now, - data) - - def _build_report(self, details): - metric_details = defaultdict(int) - nb_metrics = 0 - measures = 0 - for i in six.moves.range(self.NUM_SACKS): - if details: - headers, files = self.swift.get_container( - self.get_sack_name(i), full_listing=True) - for f in files: - metric, __ = f['name'].split("/", 1) - metric_details[metric] += 1 - else: - headers, files = self.swift.get_container( - self.get_sack_name(i), delimiter='/', full_listing=True) - nb_metrics += len(files) - measures += int(headers.get('x-container-object-count')) - return (nb_metrics or len(metric_details), measures, - metric_details if details else None) - - def list_metric_with_measures_to_process(self, sack): - headers, files = self.swift.get_container( - self.get_sack_name(sack), delimiter='/', full_listing=True) - return set(f['subdir'][:-1] for f in files if 'subdir' in f) - - def _list_measure_files_for_metric_id(self, sack, metric_id): - headers, files = self.swift.get_container( - self.get_sack_name(sack), path=six.text_type(metric_id), - full_listing=True) - return files - - def delete_unprocessed_measures_for_metric_id(self, metric_id): - sack = self.sack_for_metric(metric_id) - files = self._list_measure_files_for_metric_id(sack, metric_id) - swift.bulk_delete(self.swift, self.get_sack_name(sack), files) - - def has_unprocessed(self, metric): - sack = self.sack_for_metric(metric.id) - return bool(self._list_measure_files_for_metric_id(sack, metric.id)) - - @contextlib.contextmanager - def process_measure_for_metric(self, metric): - sack = self.sack_for_metric(metric.id) - sack_name = self.get_sack_name(sack) - files = self._list_measure_files_for_metric_id(sack, metric.id) - - measures = [] - for f in files: - headers, data = self.swift.get_object(sack_name, f['name']) - measures.extend(self._unserialize_measures(f['name'], data)) - - yield measures - - # Now clean objects - swift.bulk_delete(self.swift, sack_name, files) diff --git a/gnocchi/storage/redis.py b/gnocchi/storage/redis.py deleted file mode 100644 index fc2c63ad5..000000000 --- a/gnocchi/storage/redis.py +++ /dev/null @@ -1,114 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2017 Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_config import cfg - -from gnocchi import storage -from gnocchi.storage import _carbonara -from gnocchi.storage.common import redis - - -OPTS = [ - cfg.StrOpt('redis_url', - default='redis://localhost:6379/', - help='Redis URL'), -] - - -class RedisStorage(_carbonara.CarbonaraBasedStorage): - WRITE_FULL = True - - STORAGE_PREFIX = "timeseries" - FIELD_SEP = '_' - - def __init__(self, conf, incoming): - super(RedisStorage, self).__init__(conf, incoming) - self._client = redis.get_client(conf) - - def _metric_key(self, metric): - return redis.SEP.join([self.STORAGE_PREFIX, str(metric.id)]) - - @staticmethod - def _unaggregated_field(version=3): - return 'none' + ("_v%s" % version if version else "") - - @classmethod - def _aggregated_field_for_split(cls, aggregation, timestamp_key, - granularity, version=3): - path = cls.FIELD_SEP.join([timestamp_key, aggregation, - str(granularity)]) - return path + '_v%s' % version if version else path - - def _create_metric(self, metric): - key = self._metric_key(metric) - if self._client.exists(key): - raise storage.MetricAlreadyExists(metric) - self._client.hset(key, self._unaggregated_field(), '') - - def _store_unaggregated_timeserie(self, metric, data, version=3): - self._client.hset(self._metric_key(metric), - self._unaggregated_field(version), data) - - def _get_unaggregated_timeserie(self, metric, version=3): - data = self._client.hget(self._metric_key(metric), - self._unaggregated_field(version)) - if data is None: - raise storage.MetricDoesNotExist(metric) - return data - - def _list_split_keys_for_metric(self, metric, aggregation, granularity, - version=3): - key = self._metric_key(metric) - if not self._client.exists(key): - raise storage.MetricDoesNotExist(metric) - split_keys = set() - hashes = self._client.hscan_iter( - key, match=self._aggregated_field_for_split(aggregation, '*', - granularity, version)) - for f, __ in hashes: - meta = f.decode("utf8").split(self.FIELD_SEP, 1) - split_keys.add(meta[0]) - return split_keys - - def _delete_metric_measures(self, metric, timestamp_key, aggregation, - granularity, version=3): - key = self._metric_key(metric) - field = self._aggregated_field_for_split( - aggregation, timestamp_key, granularity, version) - self._client.hdel(key, field) - - def _store_metric_measures(self, metric, timestamp_key, aggregation, - granularity, data, offset=None, version=3): - key = self._metric_key(metric) - field = self._aggregated_field_for_split( - aggregation, timestamp_key, granularity, version) - self._client.hset(key, field, data) - - def _delete_metric(self, metric): - self._client.delete(self._metric_key(metric)) - - # Carbonara API - - def _get_measures(self, metric, timestamp_key, aggregation, granularity, - version=3): - key = self._metric_key(metric) - field = self._aggregated_field_for_split( - aggregation, timestamp_key, granularity, version) - data = self._client.hget(key, field) - if data is None: - if not self._client.exists(key): - raise storage.MetricDoesNotExist(metric) - raise storage.AggregationDoesNotExist(metric, aggregation) - return data diff --git a/gnocchi/storage/s3.py b/gnocchi/storage/s3.py deleted file mode 100644 index 59c801de1..000000000 --- a/gnocchi/storage/s3.py +++ /dev/null @@ -1,221 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016-2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os - -from oslo_config import cfg -import tenacity - -from gnocchi import storage -from gnocchi.storage import _carbonara -from gnocchi.storage.common import s3 - -boto3 = s3.boto3 -botocore = s3.botocore - -OPTS = [ - cfg.StrOpt('s3_endpoint_url', - help='S3 endpoint URL'), - cfg.StrOpt('s3_region_name', - default=os.getenv("AWS_DEFAULT_REGION"), - help='S3 region name'), - cfg.StrOpt('s3_access_key_id', - default=os.getenv("AWS_ACCESS_KEY_ID"), - help='S3 access key id'), - cfg.StrOpt('s3_secret_access_key', - default=os.getenv("AWS_SECRET_ACCESS_KEY"), - help='S3 secret access key'), - cfg.StrOpt('s3_bucket_prefix', - # Max bucket length is 63 and we use "-" as separator - # 63 - 1 - len(uuid) = 26 - max_length=26, - default='gnocchi', - help='Prefix to namespace metric bucket.'), - cfg.FloatOpt('s3_check_consistency_timeout', - min=0, - default=60, - help="Maximum time to wait checking data consistency when " - "writing to S3. Set to 0 to disable data consistency " - "validation."), -] - - -def retry_if_operationaborted(exception): - return (isinstance(exception, botocore.exceptions.ClientError) - and exception.response['Error'].get('Code') == "OperationAborted") - - -class S3Storage(_carbonara.CarbonaraBasedStorage): - - WRITE_FULL = True - - _consistency_wait = tenacity.wait_exponential(multiplier=0.1) - - def __init__(self, conf, incoming): - super(S3Storage, self).__init__(conf, incoming) - self.s3, self._region_name, self._bucket_prefix = ( - s3.get_connection(conf) - ) - self._bucket_name = '%s-aggregates' % self._bucket_prefix - if conf.s3_check_consistency_timeout > 0: - self._consistency_stop = tenacity.stop_after_delay( - conf.s3_check_consistency_timeout) - else: - self._consistency_stop = None - - def upgrade(self, index, num_sacks): - super(S3Storage, self).upgrade(index, num_sacks) - try: - s3.create_bucket(self.s3, self._bucket_name, self._region_name) - except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') != "BucketAlreadyExists": - raise - - @staticmethod - def _object_name(split_key, aggregation, granularity, version=3): - name = '%s_%s_%s' % (aggregation, granularity, split_key) - return name + '_v%s' % version if version else name - - @staticmethod - def _prefix(metric): - return str(metric.id) + '/' - - def _create_metric(self, metric): - pass - - def _put_object_safe(self, Bucket, Key, Body): - put = self.s3.put_object(Bucket=Bucket, Key=Key, Body=Body) - - if self._consistency_stop: - - def _head(): - return self.s3.head_object(Bucket=Bucket, - Key=Key, IfMatch=put['ETag']) - - tenacity.Retrying( - retry=tenacity.retry_if_result( - lambda r: r['ETag'] != put['ETag']), - wait=self._consistency_wait, - stop=self._consistency_stop)(_head) - - def _store_metric_measures(self, metric, timestamp_key, aggregation, - granularity, data, offset=0, version=3): - self._put_object_safe( - Bucket=self._bucket_name, - Key=self._prefix(metric) + self._object_name( - timestamp_key, aggregation, granularity, version), - Body=data) - - def _delete_metric_measures(self, metric, timestamp_key, aggregation, - granularity, version=3): - self.s3.delete_object( - Bucket=self._bucket_name, - Key=self._prefix(metric) + self._object_name( - timestamp_key, aggregation, granularity, version)) - - def _delete_metric(self, metric): - bucket = self._bucket_name - response = {} - while response.get('IsTruncated', True): - if 'NextContinuationToken' in response: - kwargs = { - 'ContinuationToken': response['NextContinuationToken'] - } - else: - kwargs = {} - try: - response = self.s3.list_objects_v2( - Bucket=bucket, Prefix=self._prefix(metric), **kwargs) - except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') == "NoSuchKey": - # Maybe it never has been created (no measure) - return - raise - s3.bulk_delete(self.s3, bucket, - [c['Key'] for c in response.get('Contents', ())]) - - def _get_measures(self, metric, timestamp_key, aggregation, granularity, - version=3): - try: - response = self.s3.get_object( - Bucket=self._bucket_name, - Key=self._prefix(metric) + self._object_name( - timestamp_key, aggregation, granularity, version)) - except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') == 'NoSuchKey': - try: - response = self.s3.list_objects_v2( - Bucket=self._bucket_name, Prefix=self._prefix(metric)) - except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') == 'NoSuchKey': - raise storage.MetricDoesNotExist(metric) - raise - raise storage.AggregationDoesNotExist(metric, aggregation) - raise - return response['Body'].read() - - def _list_split_keys_for_metric(self, metric, aggregation, granularity, - version=3): - bucket = self._bucket_name - keys = set() - response = {} - while response.get('IsTruncated', True): - if 'NextContinuationToken' in response: - kwargs = { - 'ContinuationToken': response['NextContinuationToken'] - } - else: - kwargs = {} - try: - response = self.s3.list_objects_v2( - Bucket=bucket, - Prefix=self._prefix(metric) + '%s_%s' % (aggregation, - granularity), - **kwargs) - except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') == "NoSuchKey": - raise storage.MetricDoesNotExist(metric) - raise - for f in response.get('Contents', ()): - try: - meta = f['Key'].split('_') - if (self._version_check(f['Key'], version)): - keys.add(meta[2]) - except (ValueError, IndexError): - # Might be "none", or any other file. Be resilient. - continue - return keys - - @staticmethod - def _build_unaggregated_timeserie_path(metric, version): - return S3Storage._prefix(metric) + 'none' + ("_v%s" % version - if version else "") - - def _get_unaggregated_timeserie(self, metric, version=3): - try: - response = self.s3.get_object( - Bucket=self._bucket_name, - Key=self._build_unaggregated_timeserie_path(metric, version)) - except botocore.exceptions.ClientError as e: - if e.response['Error'].get('Code') == "NoSuchKey": - raise storage.MetricDoesNotExist(metric) - raise - return response['Body'].read() - - def _store_unaggregated_timeserie(self, metric, data, version=3): - self._put_object_safe( - Bucket=self._bucket_name, - Key=self._build_unaggregated_timeserie_path(metric, version), - Body=data) diff --git a/gnocchi/storage/swift.py b/gnocchi/storage/swift.py deleted file mode 100644 index 52dadbdbb..000000000 --- a/gnocchi/storage/swift.py +++ /dev/null @@ -1,185 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from gnocchi import storage -from gnocchi.storage import _carbonara -from gnocchi.storage.common import swift - -swclient = swift.swclient -swift_utils = swift.swift_utils - -OPTS = [ - cfg.StrOpt('swift_auth_version', - default='1', - help='Swift authentication version to user.'), - cfg.StrOpt('swift_preauthurl', - help='Swift pre-auth URL.'), - cfg.StrOpt('swift_authurl', - default="http://localhost:8080/auth/v1.0", - help='Swift auth URL.'), - cfg.StrOpt('swift_preauthtoken', - secret=True, - help='Swift token to user to authenticate.'), - cfg.StrOpt('swift_user', - default="admin:admin", - help='Swift user.'), - cfg.StrOpt('swift_user_domain_name', - default='Default', - help='Swift user domain name.'), - cfg.StrOpt('swift_key', - secret=True, - default="admin", - help='Swift key/password.'), - cfg.StrOpt('swift_project_name', - help='Swift tenant name, only used in v2/v3 auth.', - deprecated_name="swift_tenant_name"), - cfg.StrOpt('swift_project_domain_name', - default='Default', - help='Swift project domain name.'), - cfg.StrOpt('swift_container_prefix', - default='gnocchi', - help='Prefix to namespace metric containers.'), - cfg.StrOpt('swift_endpoint_type', - default='publicURL', - help='Endpoint type to connect to Swift',), - cfg.IntOpt('swift_timeout', - min=0, - default=300, - help='Connection timeout in seconds.'), -] - - -class SwiftStorage(_carbonara.CarbonaraBasedStorage): - - WRITE_FULL = True - - def __init__(self, conf, incoming): - super(SwiftStorage, self).__init__(conf, incoming) - self.swift = swift.get_connection(conf) - self._container_prefix = conf.swift_container_prefix - - def _container_name(self, metric): - return '%s.%s' % (self._container_prefix, str(metric.id)) - - @staticmethod - def _object_name(split_key, aggregation, granularity, version=3): - name = '%s_%s_%s' % (split_key, aggregation, granularity) - return name + '_v%s' % version if version else name - - def _create_metric(self, metric): - # TODO(jd) A container per user in their account? - resp = {} - self.swift.put_container(self._container_name(metric), - response_dict=resp) - # put_container() should return 201 Created; if it returns 204, that - # means the metric was already created! - if resp['status'] == 204: - raise storage.MetricAlreadyExists(metric) - - def _store_metric_measures(self, metric, timestamp_key, aggregation, - granularity, data, offset=None, version=3): - self.swift.put_object( - self._container_name(metric), - self._object_name(timestamp_key, aggregation, granularity, - version), - data) - - def _delete_metric_measures(self, metric, timestamp_key, aggregation, - granularity, version=3): - self.swift.delete_object( - self._container_name(metric), - self._object_name(timestamp_key, aggregation, granularity, - version)) - - def _delete_metric(self, metric): - container = self._container_name(metric) - try: - headers, files = self.swift.get_container( - container, full_listing=True) - except swclient.ClientException as e: - if e.http_status != 404: - # Maybe it never has been created (no measure) - raise - else: - swift.bulk_delete(self.swift, container, files) - try: - self.swift.delete_container(container) - except swclient.ClientException as e: - if e.http_status != 404: - # Deleted in the meantime? Whatever. - raise - - def _get_measures(self, metric, timestamp_key, aggregation, granularity, - version=3): - try: - headers, contents = self.swift.get_object( - self._container_name(metric), self._object_name( - timestamp_key, aggregation, granularity, version)) - except swclient.ClientException as e: - if e.http_status == 404: - try: - self.swift.head_container(self._container_name(metric)) - except swclient.ClientException as e: - if e.http_status == 404: - raise storage.MetricDoesNotExist(metric) - raise - raise storage.AggregationDoesNotExist(metric, aggregation) - raise - return contents - - def _list_split_keys_for_metric(self, metric, aggregation, granularity, - version=3): - container = self._container_name(metric) - try: - headers, files = self.swift.get_container( - container, full_listing=True) - except swclient.ClientException as e: - if e.http_status == 404: - raise storage.MetricDoesNotExist(metric) - raise - keys = set() - for f in files: - try: - meta = f['name'].split('_') - if (aggregation == meta[1] and granularity == float(meta[2]) - and self._version_check(f['name'], version)): - keys.add(meta[0]) - except (ValueError, IndexError): - # Might be "none", or any other file. Be resilient. - continue - return keys - - @staticmethod - def _build_unaggregated_timeserie_path(version): - return 'none' + ("_v%s" % version if version else "") - - def _get_unaggregated_timeserie(self, metric, version=3): - try: - headers, contents = self.swift.get_object( - self._container_name(metric), - self._build_unaggregated_timeserie_path(version)) - except swclient.ClientException as e: - if e.http_status == 404: - raise storage.MetricDoesNotExist(metric) - raise - return contents - - def _store_unaggregated_timeserie(self, metric, data, version=3): - self.swift.put_object(self._container_name(metric), - self._build_unaggregated_timeserie_path(version), - data) diff --git a/gnocchi/tempest/__init__.py b/gnocchi/tempest/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/gnocchi/tempest/config.py b/gnocchi/tempest/config.py deleted file mode 100644 index 74d7ef3ea..000000000 --- a/gnocchi/tempest/config.py +++ /dev/null @@ -1,33 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -service_option = cfg.BoolOpt('gnocchi', - default=True, - help="Whether or not Gnocchi is expected to be" - "available") - -metric_group = cfg.OptGroup(name='metric', - title='Metric Service Options') - -metric_opts = [ - cfg.StrOpt('catalog_type', - default='metric', - help="Catalog type of the Metric service."), - cfg.StrOpt('endpoint_type', - default='publicURL', - choices=['public', 'admin', 'internal', - 'publicURL', 'adminURL', 'internalURL'], - help="The endpoint type to use for the metric service."), -] diff --git a/gnocchi/tempest/plugin.py b/gnocchi/tempest/plugin.py deleted file mode 100644 index 3410471ff..000000000 --- a/gnocchi/tempest/plugin.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import - -import os - -from tempest.test_discover import plugins - -import gnocchi -from gnocchi.tempest import config as tempest_config - - -class GnocchiTempestPlugin(plugins.TempestPlugin): - def load_tests(self): - base_path = os.path.split(os.path.dirname( - os.path.abspath(gnocchi.__file__)))[0] - test_dir = "gnocchi/tempest" - full_test_dir = os.path.join(base_path, test_dir) - return full_test_dir, base_path - - def register_opts(self, conf): - conf.register_opt(tempest_config.service_option, - group='service_available') - conf.register_group(tempest_config.metric_group) - conf.register_opts(tempest_config.metric_opts, group='metric') - - def get_opt_lists(self): - return [(tempest_config.metric_group.name, - tempest_config.metric_opts), - ('service_available', [tempest_config.service_option])] diff --git a/gnocchi/tempest/scenario/__init__.py b/gnocchi/tempest/scenario/__init__.py deleted file mode 100644 index 7db0fd6f2..000000000 --- a/gnocchi/tempest/scenario/__init__.py +++ /dev/null @@ -1,110 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import - -import os -import unittest - -from gabbi import runner -from gabbi import suitemaker -from gabbi import utils -import six.moves.urllib.parse as urlparse -from tempest import config -import tempest.test - -CONF = config.CONF - -TEST_DIR = os.path.join(os.path.dirname(__file__), '..', '..', - 'tests', 'functional_live', 'gabbits') - - -class GnocchiGabbiTest(tempest.test.BaseTestCase): - credentials = ['admin'] - - TIMEOUT_SCALING_FACTOR = 5 - - @classmethod - def skip_checks(cls): - super(GnocchiGabbiTest, cls).skip_checks() - if not CONF.service_available.gnocchi: - raise cls.skipException("Gnocchi support is required") - - def _do_test(self, filename): - token = self.os_admin.auth_provider.get_token() - url = self.os_admin.auth_provider.base_url( - {'service': CONF.metric.catalog_type, - 'endpoint_type': CONF.metric.endpoint_type}) - - parsed_url = urlparse.urlsplit(url) - prefix = parsed_url.path.rstrip('/') # turn it into a prefix - if parsed_url.scheme == 'https': - port = 443 - require_ssl = True - else: - port = 80 - require_ssl = False - host = parsed_url.hostname - if parsed_url.port: - port = parsed_url.port - - os.environ["GNOCCHI_SERVICE_TOKEN"] = token - os.environ["GNOCCHI_AUTHORIZATION"] = "not used" - - with file(os.path.join(TEST_DIR, filename)) as f: - suite_dict = utils.load_yaml(f) - suite_dict.setdefault('defaults', {})['ssl'] = require_ssl - test_suite = suitemaker.test_suite_from_dict( - loader=unittest.defaultTestLoader, - test_base_name="gabbi", - suite_dict=suite_dict, - test_directory=TEST_DIR, - host=host, port=port, - fixture_module=None, - intercept=None, - prefix=prefix, - handlers=runner.initialize_handlers([]), - test_loader_name="tempest") - - # NOTE(sileht): We hide stdout/stderr and reraise the failure - # manually, tempest will print it itself. - with open(os.devnull, 'w') as stream: - result = unittest.TextTestRunner( - stream=stream, verbosity=0, failfast=True, - ).run(test_suite) - - if not result.wasSuccessful(): - failures = (result.errors + result.failures + - result.unexpectedSuccesses) - if failures: - test, bt = failures[0] - name = test.test_data.get('name', test.id()) - msg = 'From test "%s" :\n%s' % (name, bt) - self.fail(msg) - - self.assertTrue(result.wasSuccessful()) - - -def test_maker(name, filename): - def test(self): - self._do_test(filename) - test.__name__ = name - return test - - -# Create one scenario per yaml file -for filename in os.listdir(TEST_DIR): - if not filename.endswith('.yaml'): - continue - name = "test_%s" % filename[:-5].lower().replace("-", "_") - setattr(GnocchiGabbiTest, name, - test_maker(name, filename)) diff --git a/gnocchi/tests/__init__.py b/gnocchi/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/gnocchi/tests/base.py b/gnocchi/tests/base.py deleted file mode 100644 index 3f35b40c9..000000000 --- a/gnocchi/tests/base.py +++ /dev/null @@ -1,335 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2014-2016 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import functools -import json -import os -import subprocess -import threading -import uuid - -import fixtures -from oslotest import base -from oslotest import log -from oslotest import output -import six -from six.moves.urllib.parse import unquote -try: - from swiftclient import exceptions as swexc -except ImportError: - swexc = None -from testtools import testcase -from tooz import coordination - -from gnocchi import archive_policy -from gnocchi import exceptions -from gnocchi import indexer -from gnocchi import service -from gnocchi import storage - - -class SkipNotImplementedMeta(type): - def __new__(cls, name, bases, local): - for attr in local: - value = local[attr] - if callable(value) and ( - attr.startswith('test_') or attr == 'setUp'): - local[attr] = _skip_decorator(value) - return type.__new__(cls, name, bases, local) - - -def _skip_decorator(func): - @functools.wraps(func) - def skip_if_not_implemented(*args, **kwargs): - try: - return func(*args, **kwargs) - except exceptions.NotImplementedError as e: - raise testcase.TestSkipped(six.text_type(e)) - return skip_if_not_implemented - - -class FakeSwiftClient(object): - def __init__(self, *args, **kwargs): - self.kvs = {} - - def put_container(self, container, response_dict=None): - if response_dict is not None: - if container in self.kvs: - response_dict['status'] = 204 - else: - response_dict['status'] = 201 - self.kvs[container] = {} - - def get_container(self, container, delimiter=None, - path=None, full_listing=False, limit=None): - try: - container = self.kvs[container] - except KeyError: - raise swexc.ClientException("No such container", - http_status=404) - - files = [] - directories = set() - for k, v in six.iteritems(container.copy()): - if path and not k.startswith(path): - continue - - if delimiter is not None and delimiter in k: - dirname = k.split(delimiter, 1)[0] - if dirname not in directories: - directories.add(dirname) - files.append({'subdir': dirname + delimiter}) - else: - files.append({'bytes': len(v), - 'last_modified': None, - 'hash': None, - 'name': k, - 'content_type': None}) - - if full_listing: - end = None - elif limit: - end = limit - else: - # In truth, it's 10000, but 1 is enough to make sure our test fails - # otherwise. - end = 1 - - return ({'x-container-object-count': len(container.keys())}, - (files + list(directories))[:end]) - - def put_object(self, container, key, obj): - if hasattr(obj, "seek"): - obj.seek(0) - obj = obj.read() - # TODO(jd) Maybe we should reset the seek(), but well… - try: - self.kvs[container][key] = obj - except KeyError: - raise swexc.ClientException("No such container", - http_status=404) - - def get_object(self, container, key): - try: - return {}, self.kvs[container][key] - except KeyError: - raise swexc.ClientException("No such container/object", - http_status=404) - - def delete_object(self, container, obj): - try: - del self.kvs[container][obj] - except KeyError: - raise swexc.ClientException("No such container/object", - http_status=404) - - def delete_container(self, container): - if container not in self.kvs: - raise swexc.ClientException("No such container", - http_status=404) - if self.kvs[container]: - raise swexc.ClientException("Container not empty", - http_status=409) - del self.kvs[container] - - def head_container(self, container): - if container not in self.kvs: - raise swexc.ClientException("No such container", - http_status=404) - - def post_account(self, headers, query_string=None, data=None, - response_dict=None): - if query_string == 'bulk-delete': - resp = {'Response Status': '200 OK', - 'Response Body': '', - 'Number Deleted': 0, - 'Number Not Found': 0} - if response_dict is not None: - response_dict['status'] = 200 - if data: - for path in data.splitlines(): - try: - __, container, obj = (unquote(path.decode('utf8')) - .split('/', 2)) - del self.kvs[container][obj] - resp['Number Deleted'] += 1 - except KeyError: - resp['Number Not Found'] += 1 - return {}, json.dumps(resp).encode('utf-8') - - if response_dict is not None: - response_dict['status'] = 204 - - return {}, None - - -@six.add_metaclass(SkipNotImplementedMeta) -class TestCase(base.BaseTestCase): - - REDIS_DB_INDEX = 0 - REDIS_DB_LOCK = threading.Lock() - - ARCHIVE_POLICIES = { - 'no_granularity_match': archive_policy.ArchivePolicy( - "no_granularity_match", - 0, [ - # 2 second resolution for a day - archive_policy.ArchivePolicyItem( - granularity=2, points=3600 * 24), - ], - ), - 'low': archive_policy.ArchivePolicy( - "low", 0, [ - # 5 minutes resolution for an hour - archive_policy.ArchivePolicyItem( - granularity=300, points=12), - # 1 hour resolution for a day - archive_policy.ArchivePolicyItem( - granularity=3600, points=24), - # 1 day resolution for a month - archive_policy.ArchivePolicyItem( - granularity=3600 * 24, points=30), - ], - ), - 'medium': archive_policy.ArchivePolicy( - "medium", 0, [ - # 1 minute resolution for an day - archive_policy.ArchivePolicyItem( - granularity=60, points=60 * 24), - # 1 hour resolution for a week - archive_policy.ArchivePolicyItem( - granularity=3600, points=7 * 24), - # 1 day resolution for a year - archive_policy.ArchivePolicyItem( - granularity=3600 * 24, points=365), - ], - ), - 'high': archive_policy.ArchivePolicy( - "high", 0, [ - # 1 second resolution for an hour - archive_policy.ArchivePolicyItem( - granularity=1, points=3600), - # 1 minute resolution for a week - archive_policy.ArchivePolicyItem( - granularity=60, points=60 * 24 * 7), - # 1 hour resolution for a year - archive_policy.ArchivePolicyItem( - granularity=3600, points=365 * 24), - ], - ), - } - - @classmethod - def setUpClass(self): - super(TestCase, self).setUpClass() - - # NOTE(sileht): oslotest does this in setUp() but we - # need it here - self.output = output.CaptureOutput() - self.output.setUp() - self.log = log.ConfigureLogging() - self.log.setUp() - - self.conf = service.prepare_service([], - default_config_files=[]) - py_root = os.path.abspath(os.path.join(os.path.dirname(__file__), - '..',)) - self.conf.set_override('paste_config', - os.path.join(py_root, 'rest', 'api-paste.ini'), - group="api") - self.conf.set_override('policy_file', - os.path.join(py_root, 'rest', 'policy.json'), - group="oslo_policy") - - # NOTE(jd) This allows to test S3 on AWS - if not os.getenv("AWS_ACCESS_KEY_ID"): - self.conf.set_override('s3_endpoint_url', - os.getenv("GNOCCHI_STORAGE_HTTP_URL"), - group="storage") - self.conf.set_override('s3_access_key_id', "gnocchi", - group="storage") - self.conf.set_override('s3_secret_access_key', "anythingworks", - group="storage") - - self.index = indexer.get_driver(self.conf) - self.index.connect() - - # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all - # their tables in a single transaction even with the - # checkfirst=True, so what we do here is we force the upgrade code - # path to be sequential to avoid race conditions as the tests run - # in parallel. - self.coord = coordination.get_coordinator( - self.conf.storage.coordination_url, - str(uuid.uuid4()).encode('ascii')) - - self.coord.start(start_heart=True) - - with self.coord.get_lock(b"gnocchi-tests-db-lock"): - self.index.upgrade() - - self.coord.stop() - - self.archive_policies = self.ARCHIVE_POLICIES.copy() - for name, ap in six.iteritems(self.archive_policies): - # Create basic archive policies - try: - self.index.create_archive_policy(ap) - except indexer.ArchivePolicyAlreadyExists: - pass - - storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file") - self.conf.set_override('driver', storage_driver, 'storage') - if storage_driver == 'ceph': - self.conf.set_override('ceph_conffile', - os.getenv("CEPH_CONF"), - 'storage') - - def setUp(self): - super(TestCase, self).setUp() - if swexc: - self.useFixture(fixtures.MockPatch( - 'swiftclient.client.Connection', - FakeSwiftClient)) - - if self.conf.storage.driver == 'file': - tempdir = self.useFixture(fixtures.TempDir()) - self.conf.set_override('file_basepath', - tempdir.path, - 'storage') - elif self.conf.storage.driver == 'ceph': - pool_name = uuid.uuid4().hex - subprocess.call("rados -c %s mkpool %s" % ( - os.getenv("CEPH_CONF"), pool_name), shell=True) - self.conf.set_override('ceph_pool', pool_name, 'storage') - - # Override the bucket prefix to be unique to avoid concurrent access - # with any other test - self.conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26], - "storage") - - self.storage = storage.get_driver(self.conf) - - if self.conf.storage.driver == 'redis': - # Create one prefix per test - self.storage.STORAGE_PREFIX = str(uuid.uuid4()) - self.storage.incoming.SACK_PREFIX = str(uuid.uuid4()) - - self.storage.upgrade(self.index, 128) - - def tearDown(self): - self.index.disconnect() - self.storage.stop() - super(TestCase, self).tearDown() diff --git a/gnocchi/tests/functional/__init__.py b/gnocchi/tests/functional/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/gnocchi/tests/functional/fixtures.py b/gnocchi/tests/functional/fixtures.py deleted file mode 100644 index 900041944..000000000 --- a/gnocchi/tests/functional/fixtures.py +++ /dev/null @@ -1,189 +0,0 @@ -# -# Copyright 2015 Red Hat. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Fixtures for use with gabbi tests.""" - -import os -import shutil -import tempfile -import threading -import time -from unittest import case -import warnings - -from gabbi import fixture -from oslo_config import cfg -from oslo_middleware import cors -from oslotest import log -from oslotest import output -import sqlalchemy_utils - -from gnocchi import indexer -from gnocchi.indexer import sqlalchemy -from gnocchi.rest import app -from gnocchi import service -from gnocchi import storage -from gnocchi.tests import utils - - -# NOTE(chdent): Hack to restore semblance of global configuration to -# pass to the WSGI app used per test suite. LOAD_APP_KWARGS are the olso -# configuration, and the pecan application configuration of -# which the critical part is a reference to the current indexer. -LOAD_APP_KWARGS = None - - -def setup_app(): - global LOAD_APP_KWARGS - return app.load_app(**LOAD_APP_KWARGS) - - -class ConfigFixture(fixture.GabbiFixture): - """Establish the relevant configuration fixture, per test file. - - Each test file gets its own oslo config and its own indexer and storage - instance. The indexer is based on the current database url. The storage - uses a temporary directory. - - To use this fixture in a gabbit add:: - - fixtures: - - ConfigFixture - """ - - def __init__(self): - self.conf = None - self.tmp_dir = None - - def start_fixture(self): - """Create necessary temp files and do the config dance.""" - - self.output = output.CaptureOutput() - self.output.setUp() - self.log = log.ConfigureLogging() - self.log.setUp() - - global LOAD_APP_KWARGS - - data_tmp_dir = tempfile.mkdtemp(prefix='gnocchi') - - if os.getenv("GABBI_LIVE"): - dcf = None - else: - dcf = [] - conf = service.prepare_service([], - default_config_files=dcf) - py_root = os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', '..',)) - conf.set_override('paste_config', - os.path.join(py_root, 'rest', 'api-paste.ini'), - group="api") - conf.set_override('policy_file', - os.path.join(py_root, 'rest', 'policy.json'), - group="oslo_policy") - - # NOTE(sileht): This is not concurrency safe, but only this tests file - # deal with cors, so we are fine. set_override don't work because cors - # group doesn't yet exists, and we the CORS middleware is created it - # register the option and directly copy value of all configurations - # options making impossible to override them properly... - cfg.set_defaults(cors.CORS_OPTS, allowed_origin="http://foobar.com") - - self.conf = conf - self.tmp_dir = data_tmp_dir - - if conf.indexer.url is None: - raise case.SkipTest("No indexer configured") - - # Use the presence of DEVSTACK_GATE_TEMPEST as a semaphore - # to signal we are not in a gate driven functional test - # and thus should override conf settings. - if 'DEVSTACK_GATE_TEMPEST' not in os.environ: - conf.set_override('driver', 'file', 'storage') - conf.set_override('file_basepath', data_tmp_dir, 'storage') - - # NOTE(jd) All of that is still very SQL centric but we only support - # SQL for now so let's say it's good enough. - conf.set_override( - 'url', - sqlalchemy.SQLAlchemyIndexer._create_new_database( - conf.indexer.url), - 'indexer') - - index = indexer.get_driver(conf) - index.connect() - index.upgrade() - - # Set pagination to a testable value - conf.set_override('max_limit', 7, 'api') - # Those tests uses noauth mode - # TODO(jd) Rewrite them for basic - conf.set_override("auth_mode", "noauth", 'api') - - self.index = index - - s = storage.get_driver(conf) - s.upgrade(index, 128) - - LOAD_APP_KWARGS = { - 'storage': s, - 'indexer': index, - 'conf': conf, - } - - # start up a thread to async process measures - self.metricd_thread = MetricdThread(index, s) - self.metricd_thread.start() - - def stop_fixture(self): - """Clean up the config fixture and storage artifacts.""" - if hasattr(self, 'metricd_thread'): - self.metricd_thread.stop() - self.metricd_thread.join() - - if hasattr(self, 'index'): - self.index.disconnect() - - # Swallow noise from missing tables when dropping - # database. - with warnings.catch_warnings(): - warnings.filterwarnings('ignore', - module='sqlalchemy.engine.default') - sqlalchemy_utils.drop_database(self.conf.indexer.url) - - if self.tmp_dir: - shutil.rmtree(self.tmp_dir) - - self.conf.reset() - self.output.cleanUp() - self.log.cleanUp() - - -class MetricdThread(threading.Thread): - """Run metricd in a naive thread to process measures.""" - - def __init__(self, index, storer, name='metricd'): - super(MetricdThread, self).__init__(name=name) - self.index = index - self.storage = storer - self.flag = True - - def run(self): - while self.flag: - metrics = utils.list_all_incoming_metrics(self.storage.incoming) - self.storage.process_background_tasks(self.index, metrics) - time.sleep(0.1) - - def stop(self): - self.flag = False diff --git a/gnocchi/tests/functional/gabbits/aggregation.yaml b/gnocchi/tests/functional/gabbits/aggregation.yaml deleted file mode 100644 index 39c31d38b..000000000 --- a/gnocchi/tests/functional/gabbits/aggregation.yaml +++ /dev/null @@ -1,341 +0,0 @@ -fixtures: - - ConfigFixture - -defaults: - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - -tests: - - name: create archive policy - desc: for later use - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: low - definition: - - granularity: 1 second - - granularity: 300 seconds - status: 201 - -# Aggregation by metric ids - - - name: create metric 1 - POST: /v1/metric - request_headers: - content-type: application/json - data: - archive_policy_name: low - status: 201 - - - name: create metric 2 - POST: /v1/metric - request_headers: - content-type: application/json - data: - archive_policy_name: low - status: 201 - - - name: get metric list - GET: /v1/metric - - - name: push measurements to metric 1 - POST: /v1/metric/$RESPONSE['$[0].id']/measures - request_headers: - content-type: application/json - data: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - status: 202 - - - name: push measurements to metric 2 - POST: /v1/metric/$HISTORY['get metric list'].$RESPONSE['$[1].id']/measures - request_headers: - content-type: application/json - data: - - timestamp: "2015-03-06T14:33:57" - value: 3.1 - - timestamp: "2015-03-06T14:34:12" - value: 2 - - timestamp: "2015-03-06T14:35:12" - value: 5 - status: 202 - - - name: get measure aggregates by granularity not float - GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=foobar - status: 400 - - - name: get measure aggregates by granularity with refresh - GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&refresh=true - response_json_paths: - $: - - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - - - name: get measure aggregates by granularity - GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1 - poll: - count: 10 - delay: 1 - response_json_paths: - $: - - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - - - name: get measure aggregates by granularity with timestamps - GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00 - poll: - count: 10 - delay: 1 - response_json_paths: - $: - - ['2015-03-06T14:30:00+00:00', 300.0, 15.05] - - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - - - name: get measure aggregates and reaggregate - GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&reaggregation=min - poll: - count: 10 - delay: 1 - response_json_paths: - $: - - ['2015-03-06T14:30:00+00:00', 300.0, 2.55] - - ['2015-03-06T14:33:57+00:00', 1.0, 3.1] - - ['2015-03-06T14:34:12+00:00', 1.0, 2.0] - - - name: get measure aggregates and resample - GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&resample=60 - response_json_paths: - $: - - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] - - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] - - - name: get measure aggregates with fill zero - GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&fill=0 - response_json_paths: - $: - - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - - ['2015-03-06T14:35:12+00:00', 1.0, 2.5] - - - name: get measure aggregates with fill null - GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&fill=null - response_json_paths: - $: - - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - - ['2015-03-06T14:35:12+00:00', 1.0, 5.0] - - - name: get measure aggregates with fill missing granularity - GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&fill=0 - status: 400 - - - name: get measure aggregates with bad fill - GET: /v1/aggregation/metric?metric=$HISTORY['get metric list'].$RESPONSE['$[0].id']&metric=$HISTORY['get metric list'].$RESPONSE['$[1].id']&granularity=1&fill=asdf - status: 400 - - -# Aggregation by resource and metric_name - - - name: post a resource - POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: bcd3441c-b5aa-4d1b-af9a-5a72322bb269 - metrics: - agg_meter: - archive_policy_name: low - status: 201 - - - name: post another resource - POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: 1b0a8345-b279-4cb8-bd7a-2cb83193624f - metrics: - agg_meter: - archive_policy_name: low - status: 201 - - - name: push measurements to resource 1 - POST: /v1/resource/generic/bcd3441c-b5aa-4d1b-af9a-5a72322bb269/metric/agg_meter/measures - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - status: 202 - - - name: push measurements to resource 2 - POST: /v1/resource/generic/1b0a8345-b279-4cb8-bd7a-2cb83193624f/metric/agg_meter/measures - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - - timestamp: "2015-03-06T14:33:57" - value: 3.1 - - timestamp: "2015-03-06T14:34:12" - value: 2 - - timestamp: "2015-03-06T14:35:12" - value: 5 - status: 202 - - - name: get measure aggregates by granularity from resources with refresh - POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&refresh=true - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - response_json_paths: - $: - - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - - - name: get measure aggregates by granularity from resources - POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - poll: - count: 10 - delay: 1 - response_json_paths: - $: - - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - - - name: get measure aggregates by granularity from resources and resample - POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&resample=60 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - response_json_paths: - $: - - ['2015-03-06T14:33:00+00:00', 60.0, 23.1] - - ['2015-03-06T14:34:00+00:00', 60.0, 7.0] - - - name: get measure aggregates by granularity from resources and bad resample - POST: /v1/aggregation/resource/generic/metric/agg_meter?resample=abc - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 400 - - - name: get measure aggregates by granularity from resources and resample no granularity - POST: /v1/aggregation/resource/generic/metric/agg_meter?resample=60 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 400 - response_strings: - - A granularity must be specified to resample - - - name: get measure aggregates by granularity with timestamps from resources - POST: /v1/aggregation/resource/generic/metric/agg_meter?start=2015-03-06T15:33:57%2B01:00&stop=2015-03-06T15:34:00%2B01:00 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - poll: - count: 10 - delay: 1 - response_json_paths: - $: - - ['2015-03-06T14:30:00+00:00', 300.0, 15.05] - - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - - - name: get measure aggregates by granularity from resources and reaggregate - POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&reaggregation=min - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - poll: - count: 10 - delay: 1 - response_json_paths: - $: - - ['2015-03-06T14:33:57+00:00', 1.0, 3.1] - - ['2015-03-06T14:34:12+00:00', 1.0, 2.0] - - - name: get measure aggregates from resources with fill zero - POST: /v1/aggregation/resource/generic/metric/agg_meter?granularity=1&fill=0 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - response_json_paths: - $: - - ['2015-03-06T14:33:57+00:00', 1.0, 23.1] - - ['2015-03-06T14:34:12+00:00', 1.0, 7.0] - - ['2015-03-06T14:35:12+00:00', 1.0, 2.5] - - -# Some negative tests - - - name: get measure aggregates with wrong GET - GET: /v1/aggregation/resource/generic/metric/agg_meter - status: 405 - - - name: get measure aggregates with wrong metric_name - POST: /v1/aggregation/resource/generic/metric/notexists - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 200 - response_json_paths: - $.`len`: 0 - - - name: get measure aggregates with wrong resource - POST: /v1/aggregation/resource/notexits/metric/agg_meter - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 404 - response_strings: - - Resource type notexits does not exist - - - name: get measure aggregates with wrong path - POST: /v1/aggregation/re/generic/metric/agg_meter - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 404 - - - name: get measure aggregates with wrong path 2 - POST: /v1/aggregation/resource/generic/notexists/agg_meter - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 404 - - - name: get measure aggregates with no resource name - POST: /v1/aggregation/resource/generic/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 405 diff --git a/gnocchi/tests/functional/gabbits/archive-rule.yaml b/gnocchi/tests/functional/gabbits/archive-rule.yaml deleted file mode 100644 index bc3ea60a7..000000000 --- a/gnocchi/tests/functional/gabbits/archive-rule.yaml +++ /dev/null @@ -1,197 +0,0 @@ -# -## Test the Archive Policy API to achieve coverage of just the -## ArchivePolicyRulesController. -## -# -fixtures: - - ConfigFixture - -tests: - -# create dependent policy - - name: create archive policy - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: low - definition: - - granularity: 1 hour - status: 201 - response_headers: - location: $SCHEME://$NETLOC/v1/archive_policy/low - -# Attempt to create an archive policy rule - - - name: create archive policy rule1 - POST: /v1/archive_policy_rule - request_headers: - content-type: application/json - x-roles: admin - data: - name: test_rule1 - metric_pattern: "*" - archive_policy_name: low - status: 201 - response_json_paths: - $.metric_pattern: "*" - $.archive_policy_name: low - $.name: test_rule1 - - - name: create archive policy rule 2 - POST: /v1/archive_policy_rule - request_headers: - content-type: application/json - x-roles: admin - data: - name: test_rule2 - metric_pattern: "disk.foo.*" - archive_policy_name: low - status: 201 - response_json_paths: - $.metric_pattern: disk.foo.* - $.archive_policy_name: low - $.name: test_rule2 - - - name: create archive policy rule 3 - POST: /v1/archive_policy_rule - request_headers: - content-type: application/json - x-roles: admin - data: - name: test_rule3 - metric_pattern: "disk.*" - archive_policy_name: low - status: 201 - response_json_paths: - $.metric_pattern: disk.* - $.archive_policy_name: low - $.name: test_rule3 - - -# Attempt to create an invalid policy rule - - - name: create invalid archive policy rule - POST: /v1/archive_policy_rule - request_headers: - content-type: application/json - x-roles: admin - data: - name: test_rule - metric_pattern: "disk.foo.*" - status: 400 - - - name: missing auth archive policy rule - POST: /v1/archive_policy_rule - request_headers: - content-type: application/json - data: - name: test_rule - metric_pattern: "disk.foo.*" - archive_policy_name: low - status: 403 - - - name: wrong content type - POST: /v1/archive_policy_rule - request_headers: - content-type: text/plain - x-roles: admin - status: 415 - response_strings: - - Unsupported Media Type - - - name: wrong auth create rule - POST: /v1/archive_policy_rule - request_headers: - content-type: application/json - x-roles: foo - data: - name: test_rule_wrong_auth - metric_pattern: "disk.foo.*" - archive_policy_name: low - status: 403 - - - name: missing auth createrule - POST: /v1/archive_policy_rule - request_headers: - content-type: application/json - data: - name: test_rule_miss_auth - metric_pattern: "disk.foo.*" - archive_policy_name: low - status: 403 - - - name: bad request body - POST: /v1/archive_policy_rule - request_headers: - content-type: application/json - x-roles: admin - data: - whaa: foobar - status: 400 - response_strings: - - "Invalid input: extra keys not allowed" - -# get an archive policy rules - - - name: get archive policy rule - GET: /v1/archive_policy_rule - status: 200 - response_json_paths: - $.[0].metric_pattern: disk.foo.* - $.[1].metric_pattern: disk.* - $.[2].metric_pattern: "*" - - - name: get unknown archive policy rule - GET: /v1/archive_policy_rule/foo - status: 404 - - - name: delete used archive policy - DELETE: /v1/archive_policy/low - request_headers: - x-roles: admin - status: 400 - -# delete rule as non admin - - - name: delete archive policy rule non admin - DELETE: /v1/archive_policy_rule/test_rule1 - status: 403 - -# delete rule - - - name: delete archive policy rule1 - DELETE: /v1/archive_policy_rule/test_rule1 - request_headers: - x-roles: admin - status: 204 - - - name: delete archive policy rule2 - DELETE: /v1/archive_policy_rule/test_rule2 - request_headers: - x-roles: admin - status: 204 - - - - name: delete archive policy rule3 - DELETE: /v1/archive_policy_rule/test_rule3 - request_headers: - x-roles: admin - status: 204 - -# delete again - - - name: confirm delete archive policy rule - DELETE: /v1/archive_policy_rule/test_rule1 - request_headers: - x-roles: admin - status: 404 - - - name: delete missing archive policy rule utf8 - DELETE: /v1/archive_policy_rule/%E2%9C%94%C3%A9%C3%B1%E2%98%83 - request_headers: - x-roles: admin - status: 404 - response_strings: - - Archive policy rule ✔éñ☃ does not exist diff --git a/gnocchi/tests/functional/gabbits/archive.yaml b/gnocchi/tests/functional/gabbits/archive.yaml deleted file mode 100644 index 42fe13c8f..000000000 --- a/gnocchi/tests/functional/gabbits/archive.yaml +++ /dev/null @@ -1,568 +0,0 @@ -# -# Test the Archive Policy API to achieve coverage of just the -# ArchivePoliciesController. -# - -fixtures: - - ConfigFixture - -tests: - -# Retrieve the empty list when there are no archive policies. -# NOTE(chdent): This demonstrates what used to be considered a -# security bug in JSON output: -# http://flask.pocoo.org/docs/0.10/security/#json-security -# The version described there is supposed to be fixed in most modern -# browsers but there is a new version of the problem which is only -# fixed in some: -# http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ -# The caveats point out that this is only an issue if your data is -# sensitive, which in this case...? -# However, the api-wg has made it recommendation that collections -# should be returned as an object with a named key with a value of -# a list as follows: {"archive_policies": [...]} -# This allows for extensibility such as future support for pagination. -# Do we care? - - - name: empty archive policy list - GET: /v1/archive_policy - response_headers: - content-type: /application/json/ - response_strings: - - "[]" - - - name: empty list text - GET: /v1/archive_policy - request_headers: - accept: text/plain - status: 406 - - - name: empty list html - GET: /v1/archive_policy - request_headers: - accept: text/html - status: 406 - -# Fail to create an archive policy for various reasons. - - - name: wrong content type - POST: /v1/archive_policy - request_headers: - content-type: text/plain - x-roles: admin - status: 415 - response_strings: - - Unsupported Media Type - - - name: wrong method - PUT: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - status: 405 - - - name: wrong authZ - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: clancy - data: - name: medium - definition: - - granularity: 1 second - status: 403 - - - name: missing authZ - POST: /v1/archive_policy - request_headers: - content-type: application/json - data: - name: medium - definition: - - granularity: 1 second - status: 403 - - - name: bad request body - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - cowsay: moo - status: 400 - response_strings: - - "Invalid input: extra keys not allowed" - - - name: missing definition - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: medium - status: 400 - response_strings: - - "Invalid input: required key not provided" - - - name: empty definition - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: medium - definition: [] - status: 400 - response_strings: - - "Invalid input: length of value must be at least 1" - - - name: wrong value definition - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: somename - definition: foobar - status: 400 - response_strings: - - "Invalid input: expected a list" - - - name: useless definition - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: medium - definition: - - cowsay: moo - status: 400 - response_strings: - - "Invalid input: extra keys not allowed" - -# Create a valid archive policy. - - - name: create archive policy - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: medium - definition: - - granularity: 1 second - points: 20 - - granularity: 2 second - response_headers: - location: $SCHEME://$NETLOC/v1/archive_policy/medium - status: 201 - -# Retrieve it correctly and then poorly - - - name: get archive policy - GET: $LOCATION - response_headers: - content-type: /application/json/ - response_json_paths: - $.name: medium - $.definition[0].granularity: "0:00:01" - $.definition[0].points: 20 - $.definition[0].timespan: "0:00:20" - $.definition[1].granularity: "0:00:02" - $.definition[1].points: null - $.definition[1].timespan: null - - - name: get wrong accept - GET: $LAST_URL - request_headers: - accept: text/plain - status: 406 - -# Update archive policy - - - name: patch archive policy with bad definition - PATCH: $LAST_URL - request_headers: - content-type: application/json - x-roles: admin - data: - definition: - - granularity: 1 second - points: 50 - timespan: 1 hour - - granularity: 2 second - status: 400 - response_strings: - - timespan ≠ granularity × points - - - name: patch archive policy with missing granularity - PATCH: $LAST_URL - request_headers: - content-type: application/json - x-roles: admin - data: - definition: - - granularity: 1 second - points: 50 - status: 400 - response_strings: - - "Archive policy medium does not support change: Cannot add or drop granularities" - - - name: patch archive policy with non-matching granularity - PATCH: $LAST_URL - request_headers: - content-type: application/json - x-roles: admin - data: - definition: - - granularity: 5 second - points: 20 - - granularity: 2 second - status: 400 - response_strings: - - "Archive policy medium does not support change: 1.0 granularity interval was changed" - - - name: patch archive policy - PATCH: $LAST_URL - request_headers: - content-type: application/json - x-roles: admin - data: - definition: - - granularity: 1 second - points: 50 - - granularity: 2 second - status: 200 - response_json_paths: - $.name: medium - $.definition[0].granularity: "0:00:01" - $.definition[0].points: 50 - $.definition[0].timespan: "0:00:50" - - - name: get patched archive policy - GET: $LAST_URL - response_headers: - content-type: /application/json/ - response_json_paths: - $.name: medium - $.definition[0].granularity: "0:00:01" - $.definition[0].points: 50 - $.definition[0].timespan: "0:00:50" - -# Unexpected methods - - - name: post single archive - POST: $LAST_URL - status: 405 - - - name: put single archive - PUT: $LAST_URL - status: 405 - -# Create another one and then test duplication - - - name: create second policy - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: large - definition: - - granularity: 1 hour - response_headers: - location: $SCHEME://$NETLOC/v1/archive_policy/large - status: 201 - - - name: create duplicate policy - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: large - definition: - - granularity: 1 hour - status: 409 - response_strings: - - Archive policy large already exists - -# Create a unicode named policy - - - name: post unicode policy name - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: ✔éñ☃ - definition: - - granularity: 1 minute - points: 20 - status: 201 - response_headers: - location: $SCHEME://$NETLOC/v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 - response_json_paths: - name: ✔éñ☃ - - - name: retrieve unicode policy name - GET: $LOCATION - response_json_paths: - name: ✔éñ☃ - - - name: post small unicode policy name - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: æ - definition: - - granularity: 1 minute - points: 20 - status: 201 - response_headers: - location: $SCHEME://$NETLOC/v1/archive_policy/%C3%A6 - response_json_paths: - name: æ - - - name: retrieve small unicode policy name - GET: $LOCATION - response_json_paths: - name: æ - -# List the collection - - - name: get archive policy list - GET: /v1/archive_policy - response_strings: - - '"name": "medium"' - - '"name": "large"' - response_json_paths: - $[?name = "large"].definition[?granularity = "1:00:00"].points: null - $[?name = "medium"].definition[?granularity = "0:00:02"].points: null - -# Delete one as non-admin - - - name: delete single archive non admin - DELETE: /v1/archive_policy/medium - status: 403 - -# Delete one - - - name: delete single archive - DELETE: /v1/archive_policy/medium - request_headers: - x-roles: admin - status: 204 - -# It really is gone - - - name: confirm delete - GET: $LAST_URL - status: 404 - -# Fail to delete one that does not exist - - - name: delete missing archive - DELETE: /v1/archive_policy/grandiose - request_headers: - x-roles: admin - status: 404 - response_strings: - - Archive policy grandiose does not exist - - - name: delete archive utf8 - DELETE: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 - request_headers: - x-roles: admin - status: 204 - - - name: delete missing archive utf8 again - DELETE: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 - request_headers: - x-roles: admin - status: 404 - response_strings: - - Archive policy ✔éñ☃ does not exist - -# Add metric using the policy and then be unable to delete policy - - - name: create metric - POST: /v1/metric - request_headers: - content-type: application/json - x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee - x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f - data: - archive_policy_name: large - status: 201 - - - name: delete in use policy - DELETE: /v1/archive_policy/large - request_headers: - x-roles: admin - status: 400 - response_strings: - - Archive policy large is still in use - -# Attempt to create illogical policies - - - name: create illogical policy - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: complex - definition: - - granularity: 1 second - points: 60 - timespan: "0:01:01" - status: 400 - response_strings: - - timespan ≠ granularity × points - - - name: create invalid points policy - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: complex - definition: - - granularity: 0 - points: 60 - status: 400 - response_strings: - - "Invalid input: not a valid value for dictionary value" - - - name: create invalid granularity policy - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: complex - definition: - - granularity: 10 - points: 0 - status: 400 - response_strings: - - "Invalid input: not a valid value for dictionary value" - - - name: create identical granularities policy - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: complex - definition: - - granularity: 1 second - points: 60 - - granularity: 1 second - points: 120 - status: 400 - response_strings: - - "More than one archive policy uses granularity `1.0'" - - - name: policy invalid unit - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: 227d0e1f-4295-4e4b-8515-c296c47d71d3 - definition: - - granularity: 1 second - timespan: "1 shenanigan" - status: 400 - -# Non admin user attempt - - - name: fail to create policy non-admin - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-user-id: b45187c5-150b-4730-bcb2-b5e04e234220 - x-project-id: 16764ee0-bffe-4843-aa36-04b002cdbc7c - data: - name: f1d150d9-02ad-4fe7-8872-c64b2bcaaa97 - definition: - - granularity: 1 minute - points: 20 - status: 403 - response_strings: - - Access was denied to this resource - -# Back windows - - - name: policy with back window - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: 7720a99d-cd3b-4aa4-8a6f-935bf0d46ded - back_window: 1 - definition: - - granularity: 10s - points: 20 - status: 201 - response_json_paths: - $.back_window: 1 - $.definition[0].timespan: "0:03:20" - - - name: policy no back window - desc: and default seconds on int granularity - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: 22f2b99f-e629-4170-adc4-09b65635e056 - back_window: 0 - definition: - - granularity: 10 - points: 20 - status: 201 - response_json_paths: - $.back_window: 0 - $.definition[0].points: 20 - $.definition[0].timespan: "0:03:20" - -# Timespan, points, granularity input tests - - - name: policy float granularity - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: 595228db-ea29-4415-9d5b-ecb5366abb1b - definition: - - timespan: 1 hour - points: 1000 - status: 201 - response_json_paths: - $.definition[0].points: 1000 - $.definition[0].granularity: "0:00:04" - $.definition[0].timespan: "1:06:40" - - - name: policy float timespan - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: 6bc72791-a27e-4417-a589-afc6d2067a38 - definition: - - timespan: 1 hour - granularity: 7s - status: 201 - response_json_paths: - $.definition[0].points: 514 - $.definition[0].granularity: "0:00:07" - $.definition[0].timespan: "0:59:58" diff --git a/gnocchi/tests/functional/gabbits/async.yaml b/gnocchi/tests/functional/gabbits/async.yaml deleted file mode 100644 index fd2f97ae7..000000000 --- a/gnocchi/tests/functional/gabbits/async.yaml +++ /dev/null @@ -1,71 +0,0 @@ -# -# Test async processing of measures. -# - -fixtures: - - ConfigFixture - -tests: - - - name: create archive policy - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: moderate - definition: - - granularity: 1 second - status: 201 - - - name: make a generic resource - POST: /v1/resource/generic - request_headers: - x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147 - x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d - content-type: application/json - data: - id: 41937416-1644-497d-a0ed-b43d55a2b0ea - started_at: "2015-06-06T02:02:02.000000" - metrics: - some.counter: - archive_policy_name: moderate - status: 201 - - - name: confirm no metrics yet - GET: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures - request_headers: - x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147 - x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d - content-type: application/json - response_json_paths: - $: [] - - - name: post some measures - POST: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures - request_headers: - x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147 - x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d - content-type: application/json - data: - - timestamp: "2015-06-06T14:33:00" - value: 11 - - timestamp: "2015-06-06T14:35:00" - value: 12 - status: 202 - -# This requires a poll as the measures are not immediately -# aggregated. - - - name: get some measures - GET: /v1/resource/generic/41937416-1644-497d-a0ed-b43d55a2b0ea/metric/some.counter/measures - request_headers: - x-user-id: edca16f4-684e-4a91-85e9-0c1ceecdd147 - x-project-id: e8459971-fae4-4670-8ed3-55dd9139d26d - poll: - count: 50 - delay: .1 - response_strings: - - "2015" - response_json_paths: - $[-1][-1]: 12 diff --git a/gnocchi/tests/functional/gabbits/base.yaml b/gnocchi/tests/functional/gabbits/base.yaml deleted file mode 100644 index ef0977117..000000000 --- a/gnocchi/tests/functional/gabbits/base.yaml +++ /dev/null @@ -1,168 +0,0 @@ -fixtures: - - ConfigFixture - -defaults: - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - -tests: - -- name: get information on APIs - desc: Root URL must return information about API versions - GET: / - response_headers: - content-type: /^application\/json/ - response_json_paths: - $.versions.[0].id: "v1.0" - $.versions.[0].status: "CURRENT" - -- name: archive policy post success - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: test1 - definition: - - granularity: 1 minute - points: 20 - status: 201 - response_headers: - content-type: /^application\/json/ - location: $SCHEME://$NETLOC/v1/archive_policy/test1 - response_json_paths: - $.name: test1 - $.definition.[0].granularity: 0:01:00 - $.definition.[0].points: 20 - $.definition.[0].timespan: 0:20:00 - -- name: post archive policy no auth - desc: this confirms that auth handling comes before data validation - POST: /v1/archive_policy - request_headers: - content-type: application/json - data: - definition: - - granularity: 1 second - points: 20 - status: 403 - -- name: post metric with archive policy - POST: /v1/metric - request_headers: - content-type: application/json - x-roles: admin - x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee - x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f - data: - archive_policy_name: test1 - status: 201 - response_headers: - content-type: /application\/json/ - response_json_paths: - $.archive_policy_name: test1 - -- name: retrieve metric info - GET: $LOCATION - status: 200 - request_headers: - content_type: /application\/json/ - x-roles: admin - response_json_paths: - $.archive_policy.name: test1 - $.created_by_user_id: 93180da9-7c15-40d3-a050-a374551e52ee - $.created_by_project_id: 99d13f22-3618-4288-82b8-6512ded77e4f - -- name: list the one metric - GET: /v1/metric - status: 200 - response_json_paths: - $[0].archive_policy.name: test1 - -- name: post a single measure - desc: post one measure - POST: /v1/metric/$RESPONSE['$[0].id']/measures - request_headers: - content-type: application/json - x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee - x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f - data: - - timestamp: "2013-01-01 23:23:20" - value: 1234.2 - status: 202 - -- name: Get list of resource type and URL - desc: Resources index page should return list of type associated with a URL - GET: /v1/resource/ - response_headers: - content-type: /^application\/json/ - status: 200 - response_json_paths: - $.generic: $SCHEME://$NETLOC/v1/resource/generic - -- name: post generic resource - POST: /v1/resource/generic - request_headers: - content-type: application/json - x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee - x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f - data: - id: 5b7ebe90-4ad2-4c83-ad2c-f6344884ab70 - started_at: "2014-01-03T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - response_headers: - location: $SCHEME://$NETLOC/v1/resource/generic/5b7ebe90-4ad2-4c83-ad2c-f6344884ab70 - response_json_paths: - type: generic - started_at: "2014-01-03T02:02:02+00:00" - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - created_by_project_id: 99d13f22-3618-4288-82b8-6512ded77e4f - -- name: post generic resource bad id - POST: /v1/resource/generic - request_headers: - content-type: application/json - x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee - x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f - data: - id: 1.2.3.4 - started_at: "2014-01-03T02:02:02.000000" - user_id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c - project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea - status: 201 - response_headers: - location: $SCHEME://$NETLOC/v1/resource/generic/2d869568-70d4-5ed6-9891-7d7a3bbf572d - response_json_paths: - type: generic - started_at: "2014-01-03T02:02:02+00:00" - project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea - created_by_project_id: 99d13f22-3618-4288-82b8-6512ded77e4f - id: 2d869568-70d4-5ed6-9891-7d7a3bbf572d - original_resource_id: 1.2.3.4 - -- name: get status denied - GET: /v1/status - status: 403 - -- name: get status - GET: /v1/status - request_headers: - content-type: application/json - x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee - x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f - x-roles: admin - response_json_paths: - $.storage.`len`: 2 - -- name: get status, no details - GET: /v1/status?details=False - request_headers: - content-type: application/json - x-user-id: 93180da9-7c15-40d3-a050-a374551e52ee - x-project-id: 99d13f22-3618-4288-82b8-6512ded77e4f - x-roles: admin - response_json_paths: - $.storage.`len`: 1 diff --git a/gnocchi/tests/functional/gabbits/batch-measures.yaml b/gnocchi/tests/functional/gabbits/batch-measures.yaml deleted file mode 100644 index a121f6fbc..000000000 --- a/gnocchi/tests/functional/gabbits/batch-measures.yaml +++ /dev/null @@ -1,295 +0,0 @@ -fixtures: - - ConfigFixture - -defaults: - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - -tests: - - name: create archive policy - desc: for later use - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: simple - definition: - - granularity: 1 second - status: 201 - - - name: create metric - POST: /v1/metric - request_headers: - content-type: application/json - data: - archive_policy_name: simple - status: 201 - - - name: push measurements to metric - POST: /v1/batch/metrics/measures - request_headers: - content-type: application/json - data: - $RESPONSE['$.id']: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - status: 202 - - - name: push measurements to unknown metrics - POST: /v1/batch/metrics/measures - request_headers: - content-type: application/json - data: - 37AEC8B7-C0D9-445B-8AB9-D3C6312DCF5C: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - 37AEC8B7-C0D9-445B-8AB9-D3C6312DCF5D: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - status: 400 - response_strings: - - "Unknown metrics: 37aec8b7-c0d9-445b-8ab9-d3c6312dcf5c, 37aec8b7-c0d9-445b-8ab9-d3c6312dcf5d" - - - name: push measurements to unknown named metrics - POST: /v1/batch/resources/metrics/measures - request_headers: - content-type: application/json - data: - 37AEC8B7-C0D9-445B-8AB9-D3C6312DCF5D: - cpu_util: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - 46c9418d-d63b-4cdd-be89-8f57ffc5952e: - disk.iops: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - status: 400 - response_strings: - - "Unknown metrics: 37aec8b7-c0d9-445b-8ab9-d3c6312dcf5d/cpu_util, 46c9418d-d63b-4cdd-be89-8f57ffc5952e/disk.iops" - - - name: create second metric - POST: /v1/metric - request_headers: - content-type: application/json - data: - archive_policy_name: simple - status: 201 - - - name: post a resource - POST: /v1/resource/generic - request_headers: - content-type: application/json - data: - id: 46c9418d-d63b-4cdd-be89-8f57ffc5952e - user_id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c - project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea - metrics: - disk.iops: - archive_policy_name: simple - cpu_util: - archive_policy_name: simple - status: 201 - - - name: post a second resource - POST: /v1/resource/generic - request_headers: - content-type: application/json - data: - id: f0f6038f-f82c-4f30-8d81-65db8be249fe - user_id: 0fbb2314-8461-4b1a-8013-1fc22f6afc9c - project_id: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea - metrics: - net.speed: - archive_policy_name: simple - mem_usage: - archive_policy_name: simple - status: 201 - - - name: list metrics - GET: /v1/metric - - - name: push measurements to two metrics - POST: /v1/batch/metrics/measures - request_headers: - content-type: application/json - data: - $RESPONSE['$[0].id']: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - $RESPONSE['$[1].id']: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - status: 202 - - - name: push measurements to two named metrics - POST: /v1/batch/resources/metrics/measures - request_headers: - content-type: application/json - data: - 46c9418d-d63b-4cdd-be89-8f57ffc5952e: - disk.iops: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - cpu_util: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - f0f6038f-f82c-4f30-8d81-65db8be249fe: - mem_usage: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - net.speed: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - status: 202 - - - name: create archive policy rule for auto - POST: /v1/archive_policy_rule - request_headers: - content-type: application/json - x-roles: admin - data: - name: rule_auto - metric_pattern: "auto.*" - archive_policy_name: simple - status: 201 - - - name: push measurements to unknown named metrics and create it - POST: /v1/batch/resources/metrics/measures?create_metrics=true - request_headers: - content-type: application/json - data: - 46c9418d-d63b-4cdd-be89-8f57ffc5952e: - auto.test: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - status: 202 - - - name: get created metric to check creation - GET: /v1/resource/generic/46c9418d-d63b-4cdd-be89-8f57ffc5952e/metric/auto.test - - - name: ensure measure have been posted - GET: /v1/resource/generic/46c9418d-d63b-4cdd-be89-8f57ffc5952e/metric/auto.test/measures?refresh=true&start=2015-03-06T14:34 - response_json_paths: - $: - - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] - - - name: push measurements to unknown named metrics and resource with create_metrics with uuid resource id - POST: /v1/batch/resources/metrics/measures?create_metrics=true - request_headers: - content-type: application/json - accept: application/json - data: - aaaaaaaa-d63b-4cdd-be89-111111111111: - auto.test: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - bbbbbbbb-d63b-4cdd-be89-111111111111: - auto.test: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - - status: 400 - response_json_paths: - $.description.cause: "Unknown resources" - $.description.detail[/original_resource_id]: - - original_resource_id: "aaaaaaaa-d63b-4cdd-be89-111111111111" - resource_id: "aaaaaaaa-d63b-4cdd-be89-111111111111" - - original_resource_id: "bbbbbbbb-d63b-4cdd-be89-111111111111" - resource_id: "bbbbbbbb-d63b-4cdd-be89-111111111111" - - - name: push measurements to unknown named metrics and resource with create_metrics with uuid resource id where resources is several times listed - POST: /v1/batch/resources/metrics/measures?create_metrics=true - request_headers: - content-type: application/json - accept: application/json - data: - aaaaaaaa-d63b-4cdd-be89-111111111111: - auto.test: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - auto.test2: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - bbbbbbbb-d63b-4cdd-be89-111111111111: - auto.test: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - - status: 400 - response_json_paths: - $.description.cause: "Unknown resources" - $.description.detail[/original_resource_id]: - - original_resource_id: "aaaaaaaa-d63b-4cdd-be89-111111111111" - resource_id: "aaaaaaaa-d63b-4cdd-be89-111111111111" - - original_resource_id: "bbbbbbbb-d63b-4cdd-be89-111111111111" - resource_id: "bbbbbbbb-d63b-4cdd-be89-111111111111" - - - name: push measurements to unknown named metrics and resource with create_metrics with non uuid resource id - POST: /v1/batch/resources/metrics/measures?create_metrics=true - request_headers: - content-type: application/json - accept: application/json - data: - foobar: - auto.test: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - - status: 400 - response_json_paths: - $.description.cause: "Unknown resources" - $.description.detail: - - resource_id: "6b8e287d-c01a-538c-979b-a819ee49de5d" - original_resource_id: "foobar" - - - name: push measurements to named metrics and resource with create_metrics with wrong measure objects - POST: /v1/batch/resources/metrics/measures?create_metrics=true - request_headers: - content-type: application/json - accept: application/json - data: - 46c9418d-d63b-4cdd-be89-8f57ffc5952e: - auto.test: - - [ "2015-03-06T14:33:57", 43.1] - - [ "2015-03-06T14:34:12", 12] - status: 400 - response_strings: - - "Invalid format for measures" diff --git a/gnocchi/tests/functional/gabbits/cors.yaml b/gnocchi/tests/functional/gabbits/cors.yaml deleted file mode 100644 index bd2395d55..000000000 --- a/gnocchi/tests/functional/gabbits/cors.yaml +++ /dev/null @@ -1,21 +0,0 @@ -fixtures: - - ConfigFixture - -tests: - - name: get CORS headers for non-allowed - OPTIONS: /v1/status - request_headers: - Origin: http://notallowed.com - Access-Control-Request-Method: GET - response_forbidden_headers: - - Access-Control-Allow-Origin - - Access-Control-Allow-Methods - - - name: get CORS headers for allowed - OPTIONS: /v1/status - request_headers: - Origin: http://foobar.com - Access-Control-Request-Method: GET - response_headers: - Access-Control-Allow-Origin: http://foobar.com - Access-Control-Allow-Methods: GET diff --git a/gnocchi/tests/functional/gabbits/healthcheck.yaml b/gnocchi/tests/functional/gabbits/healthcheck.yaml deleted file mode 100644 index a2cf6fd1c..000000000 --- a/gnocchi/tests/functional/gabbits/healthcheck.yaml +++ /dev/null @@ -1,7 +0,0 @@ -fixtures: - - ConfigFixture - -tests: - - name: healthcheck - GET: /healthcheck - status: 200 diff --git a/gnocchi/tests/functional/gabbits/history.yaml b/gnocchi/tests/functional/gabbits/history.yaml deleted file mode 100644 index 0bdc47fdf..000000000 --- a/gnocchi/tests/functional/gabbits/history.yaml +++ /dev/null @@ -1,160 +0,0 @@ -# -# Test the resource history related API -# - -fixtures: - - ConfigFixture - -tests: - - name: create archive policy - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: low - definition: - - granularity: 1 hour - status: 201 - response_headers: - location: $SCHEME://$NETLOC/v1/archive_policy/low - -# Try creating a new generic resource - - - name: post generic resource - POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: f93450f2-d8a5-4d67-9985-02511241e7d1 - started_at: "2014-01-03T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - response_headers: - location: $SCHEME://$NETLOC/v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 - content-type: /^application\/json/ - response_json_paths: - $.created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - $.created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c - $.user_id: 0fbb231484614b1a80131fc22f6afc9c - -# Update it twice - - name: patch resource user_id - PATCH: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - user_id: f53c58a4-fdea-4c09-aac4-02135900be67 - status: 200 - response_json_paths: - user_id: f53c58a4-fdea-4c09-aac4-02135900be67 - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - - - name: patch resource project_id - PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - project_id: fe20a931-1012-4cc6-addc-39556ec60907 - metrics: - mymetric: - archive_policy_name: low - status: 200 - response_json_paths: - user_id: f53c58a4-fdea-4c09-aac4-02135900be67 - project_id: fe20a931-1012-4cc6-addc-39556ec60907 - -# List resources - - - name: list all resources without history - GET: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - response_json_paths: - $[0].user_id: f53c58a4-fdea-4c09-aac4-02135900be67 - $[0].project_id: fe20a931-1012-4cc6-addc-39556ec60907 - - - name: list all resources with history - GET: $LAST_URL - request_headers: - accept: application/json; details=True; history=True - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - response_json_paths: - $.`len`: 3 - $[0].id: f93450f2-d8a5-4d67-9985-02511241e7d1 - $[0].user_id: 0fbb231484614b1a80131fc22f6afc9c - $[0].project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - $[1].id: f93450f2-d8a5-4d67-9985-02511241e7d1 - $[1].user_id: f53c58a4-fdea-4c09-aac4-02135900be67 - $[1].project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - $[2].id: f93450f2-d8a5-4d67-9985-02511241e7d1 - $[2].user_id: f53c58a4-fdea-4c09-aac4-02135900be67 - $[2].project_id: fe20a931-1012-4cc6-addc-39556ec60907 - - - name: patch resource metrics - PATCH: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - metrics: - foo: - archive_policy_name: low - status: 200 - - - name: list all resources with history no change after metrics update - GET: /v1/resource/generic - request_headers: - accept: application/json; details=True; history=True - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - response_json_paths: - $.`len`: 3 - $[0].id: f93450f2-d8a5-4d67-9985-02511241e7d1 - $[0].user_id: 0fbb231484614b1a80131fc22f6afc9c - $[0].project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - $[1].id: f93450f2-d8a5-4d67-9985-02511241e7d1 - $[1].user_id: f53c58a4-fdea-4c09-aac4-02135900be67 - $[1].project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - $[2].id: f93450f2-d8a5-4d67-9985-02511241e7d1 - $[2].user_id: f53c58a4-fdea-4c09-aac4-02135900be67 - $[2].project_id: fe20a931-1012-4cc6-addc-39556ec60907 - - - name: create new metrics - POST: /v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - foobar: - archive_policy_name: low - status: 204 - - - name: list all resources with history no change after metrics creation - GET: /v1/resource/generic - request_headers: - accept: application/json; details=True; history=True - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - response_json_paths: - $.`len`: 3 - $[0].id: f93450f2-d8a5-4d67-9985-02511241e7d1 - $[0].user_id: 0fbb231484614b1a80131fc22f6afc9c - $[0].project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - $[1].id: f93450f2-d8a5-4d67-9985-02511241e7d1 - $[1].user_id: f53c58a4-fdea-4c09-aac4-02135900be67 - $[1].project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - $[2].id: f93450f2-d8a5-4d67-9985-02511241e7d1 - $[2].user_id: f53c58a4-fdea-4c09-aac4-02135900be67 - $[2].project_id: fe20a931-1012-4cc6-addc-39556ec60907 diff --git a/gnocchi/tests/functional/gabbits/metric-granularity.yaml b/gnocchi/tests/functional/gabbits/metric-granularity.yaml deleted file mode 100644 index 47a5efe36..000000000 --- a/gnocchi/tests/functional/gabbits/metric-granularity.yaml +++ /dev/null @@ -1,60 +0,0 @@ -fixtures: - - ConfigFixture - -defaults: - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - -tests: - - name: create archive policy - desc: for later use - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: cookies - definition: - - granularity: 1 second - status: 201 - - - name: create valid metric - POST: /v1/metric - request_headers: - content-type: application/json - data: - archive_policy_name: cookies - status: 201 - - - name: push measurements to metric - POST: /v1/metric/$RESPONSE['$.id']/measures - request_headers: - content-type: application/json - data: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - status: 202 - - - name: get metric list - GET: /v1/metric - status: 200 - - - name: get measurements invalid granularity - GET: /v1/metric/$RESPONSE['$[0].id']/measures?granularity=42 - status: 404 - response_strings: - - Granularity '42.0' for metric $RESPONSE['$[0].id'] does not exist - - - name: get measurements granularity - GET: /v1/metric/$HISTORY['get metric list'].$RESPONSE['$[0].id']/measures?granularity=1 - status: 200 - poll: - count: 50 - delay: .1 - response_json_paths: - $: - - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] - - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] diff --git a/gnocchi/tests/functional/gabbits/metric-list.yaml b/gnocchi/tests/functional/gabbits/metric-list.yaml deleted file mode 100644 index 59f58b96d..000000000 --- a/gnocchi/tests/functional/gabbits/metric-list.yaml +++ /dev/null @@ -1,142 +0,0 @@ -fixtures: - - ConfigFixture - -defaults: - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - x-roles: admin - -tests: - - name: create archive policy 1 - desc: for later use - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: first_archive - definition: - - granularity: 1 second - status: 201 - - - name: create archive policy 2 - desc: for later use - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: second_archive - definition: - - granularity: 1 second - status: 201 - - - name: create metric 1 - POST: /v1/metric - request_headers: - content-type: application/json - data: - name: "disk.io.rate" - unit: "B/s" - archive_policy_name: first_archive - status: 201 - response_json_paths: - $.archive_policy_name: first_archive - $.name: disk.io.rate - $.unit: B/s - - - name: create metric 2 - POST: /v1/metric - request_headers: - content-type: application/json - x-user-id: 4fff6179c2fc414dbedfc8cc82d6ada7 - x-project-id: f3ca498a61c84422b953133adb71cff8 - data: - name: "disk.io.rate" - unit: "B/s" - archive_policy_name: first_archive - status: 201 - response_json_paths: - $.archive_policy_name: first_archive - $.name: disk.io.rate - $.unit: B/s - - - name: create metric 3 - POST: /v1/metric - request_headers: - content-type: application/json - x-user-id: faf30294217c4e1a91387d9c8f1fb1fb - x-project-id: f3ca498a61c84422b953133adb71cff8 - data: - name: "cpu_util" - unit: "%" - archive_policy_name: first_archive - status: 201 - response_json_paths: - $.archive_policy_name: first_archive - $.name: cpu_util - $.unit: "%" - - - name: create metric 4 - POST: /v1/metric - request_headers: - content-type: application/json - data: - name: "cpu" - unit: "ns" - archive_policy_name: second_archive - status: 201 - response_json_paths: - $.archive_policy_name: second_archive - $.name: cpu - $.unit: ns - - - name: list metrics - GET: /v1/metric - response_json_paths: - $.`len`: 4 - - - name: list metrics by id - GET: /v1/metric?id=$HISTORY['create metric 1'].$RESPONSE['id'] - response_json_paths: - $.`len`: 1 - $[0].name: disk.io.rate - $[0].archive_policy.name: first_archive - - - name: list metrics by name - GET: /v1/metric?name=disk.io.rate - response_json_paths: - $.`len`: 2 - $[0].name: disk.io.rate - $[1].name: disk.io.rate - $[0].archive_policy.name: first_archive - $[1].archive_policy.name: first_archive - - - name: list metrics by unit - GET: /v1/metric?unit=ns - response_json_paths: - $.`len`: 1 - $[0].name: cpu - $[0].archive_policy.name: second_archive - - - name: list metrics by archive_policy - GET: /v1/metric?archive_policy_name=first_archive&sort=name:desc - response_json_paths: - $.`len`: 3 - $[0].name: disk.io.rate - $[1].name: disk.io.rate - $[2].name: cpu_util - $[0].archive_policy.name: first_archive - $[1].archive_policy.name: first_archive - $[2].archive_policy.name: first_archive - - - name: list metrics by user_id - GET: /v1/metric?user_id=faf30294217c4e1a91387d9c8f1fb1fb - response_json_paths: - $.`len`: 1 - - - name: list metrics by project_id - GET: /v1/metric?project_id=f3ca498a61c84422b953133adb71cff8 - response_json_paths: - $.`len`: 2 diff --git a/gnocchi/tests/functional/gabbits/metric-timestamp-format.yaml b/gnocchi/tests/functional/gabbits/metric-timestamp-format.yaml deleted file mode 100644 index f45228809..000000000 --- a/gnocchi/tests/functional/gabbits/metric-timestamp-format.yaml +++ /dev/null @@ -1,60 +0,0 @@ -fixtures: - - ConfigFixture - -defaults: - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - -tests: - - name: create archive policy - desc: for later use - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: cookies - definition: - - granularity: 1 second - status: 201 - - - name: create metric - POST: /v1/metric - request_headers: - content-type: application/json - data: - archive_policy_name: cookies - status: 201 - response_json_paths: - $.archive_policy_name: cookies - - - name: push measurements to metric with relative timestamp - POST: /v1/metric/$RESPONSE['$.id']/measures - request_headers: - content-type: application/json - data: - - timestamp: "-5 minutes" - value: 43.1 - status: 202 - - - name: create metric 2 - POST: /v1/metric - request_headers: - content-type: application/json - data: - archive_policy_name: cookies - status: 201 - response_json_paths: - $.archive_policy_name: cookies - - - name: push measurements to metric with mixed timestamps - POST: /v1/metric/$RESPONSE['$.id']/measures - request_headers: - content-type: application/json - data: - - timestamp: 1478012832 - value: 43.1 - - timestamp: "-5 minutes" - value: 43.1 - status: 400 diff --git a/gnocchi/tests/functional/gabbits/metric.yaml b/gnocchi/tests/functional/gabbits/metric.yaml deleted file mode 100644 index e987c81ce..000000000 --- a/gnocchi/tests/functional/gabbits/metric.yaml +++ /dev/null @@ -1,331 +0,0 @@ -fixtures: - - ConfigFixture - -defaults: - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - -tests: - - name: wrong metric - desc: https://bugs.launchpad.net/gnocchi/+bug/1429949 - GET: /v1/metric/foobar - status: 404 - - - name: create archive policy - desc: for later use - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: cookies - definition: - - granularity: 1 second - status: 201 - - - name: create archive policy rule - POST: /v1/archive_policy_rule - request_headers: - content-type: application/json - x-roles: admin - data: - name: test_rule - metric_pattern: "disk.io.*" - archive_policy_name: cookies - status: 201 - - - name: create alt archive policy - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: cream - definition: - - granularity: 5 second - status: 201 - - - name: create alt archive policy rule - desc: extra rule that won't be matched - POST: /v1/archive_policy_rule - request_headers: - content-type: application/json - x-roles: admin - data: - name: test_ignore_rule - metric_pattern: "disk.*" - archive_policy_name: cream - status: 201 - - - name: get metric empty - GET: /v1/metric - status: 200 - response_strings: - - "[]" - - - name: get metric list with nonexistent sort key - GET: /v1/metric?sort=nonexistent_key:asc - status: 400 - response_strings: - - "Sort key supplied is invalid: nonexistent_key" - - - name: create metric with name and unit - POST: /v1/metric - request_headers: - content-type: application/json - data: - name: "disk.io.rate" - unit: "B/s" - status: 201 - response_json_paths: - $.archive_policy_name: cookies - $.name: disk.io.rate - $.unit: B/s - - - name: create metric with invalid name - POST: /v1/metric - request_headers: - content-type: application/json - data: - name: "disk/io/rate" - unit: "B/s" - status: 400 - response_strings: - - "'/' is not supported in metric name" - - - name: create metric with name and over length unit - POST: /v1/metric - request_headers: - content-type: application/json - data: - name: "disk.io.rate" - unit: "over_length_unit_over_length_unit" - status: 400 - response_strings: - # split to not match the u' in py2 - - "Invalid input: length of value must be at most 31 for dictionary value @ data[" - - "'unit']" - - - name: create metric with name no rule - POST: /v1/metric - request_headers: - content-type: application/json - data: - name: "volume.io.rate" - status: 400 - response_strings: - - No archive policy name specified and no archive policy rule found matching the metric name volume.io.rate - - - name: create metric bad archive policy - POST: /v1/metric - request_headers: - content-type: application/json - data: - archive_policy_name: bad-cookie - status: 400 - response_strings: - - Archive policy bad-cookie does not exist - - - name: create metric bad content-type - POST: /v1/metric - request_headers: - content-type: plain/text - data: '{"archive_policy_name": "cookies"}' - status: 415 - - - name: create valid metric - POST: /v1/metric - request_headers: - content-type: application/json - data: - archive_policy_name: cookies - status: 201 - response_json_paths: - $.archive_policy_name: cookies - - - name: get valid metric id - GET: /v1/metric/$RESPONSE['$.id'] - status: 200 - response_json_paths: - $.archive_policy.name: cookies - - - name: push measurements to metric before epoch - POST: /v1/metric/$RESPONSE['$.id']/measures - request_headers: - content-type: application/json - data: - - timestamp: "1915-03-06T14:33:57" - value: 43.1 - status: 400 - response_strings: - - Timestamp must be after Epoch - - - name: list valid metrics - GET: /v1/metric - response_json_paths: - $[0].archive_policy.name: cookies - - - name: push measurements to metric with bad timestamp - POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures - request_headers: - content-type: application/json - data: - - timestamp: "1915-100-06T14:33:57" - value: 43.1 - status: 400 - - - name: push measurements to metric epoch format - POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures - request_headers: - content-type: application/json - data: - - timestamp: 1425652437.0 - value: 43.1 - status: 202 - - - name: push measurements to metric - POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures - request_headers: - content-type: application/json - data: - - timestamp: "2015-03-06T14:34:12" - value: 12 - status: 202 - - - name: get measurements by start - GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true&start=2015-03-06T14:34 - response_json_paths: - $: - - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] - - - name: get measurements by start with epoch - GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true&start=1425652440 - response_json_paths: - $: - - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] - - - name: get measurements from metric - GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true - response_json_paths: - $: - - ["2015-03-06T14:33:57+00:00", 1.0, 43.1] - - ["2015-03-06T14:34:12+00:00", 1.0, 12.0] - - - name: push measurements to metric again - POST: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures - request_headers: - content-type: application/json - data: - - timestamp: "2015-03-06T14:34:15" - value: 16 - - timestamp: "2015-03-06T14:35:12" - value: 9 - - timestamp: "2015-03-06T14:35:15" - value: 11 - status: 202 - - - name: get measurements from metric and resample - GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?refresh=true&resample=60&granularity=1 - response_json_paths: - $: - - ["2015-03-06T14:33:00+00:00", 60.0, 43.1] - - ["2015-03-06T14:34:00+00:00", 60.0, 14.0] - - ["2015-03-06T14:35:00+00:00", 60.0, 10.0] - - - name: get measurements from metric and resample no granularity - GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?resample=60 - status: 400 - response_strings: - - A granularity must be specified to resample - - - name: get measurements from metric and bad resample - GET: /v1/metric/$HISTORY['list valid metrics'].$RESPONSE['$[0].id']/measures?resample=abc - status: 400 - - - name: create valid metric two - POST: /v1/metric - request_headers: - content-type: application/json - data: - archive_policy_name: cookies - status: 201 - response_json_paths: - $.archive_policy_name: cookies - - - name: push invalid measurements to metric - POST: /v1/metric/$RESPONSE['$.id']/measures - request_headers: - content-type: application/json - data: - - timestamp: "2015-03-06T14:33:57" - value: 12 - - timestamp: "2015-03-06T14:34:12" - value: "foobar" - status: 400 - - - name: create valid metric three - POST: /v1/metric - request_headers: - content-type: application/json - data: - archive_policy_name: cookies - status: 201 - response_json_paths: - $.archive_policy_name: cookies - - - name: push invalid measurements to metric bis - POST: /v1/metric/$RESPONSE['$.id']/measures - request_headers: - content-type: application/json - data: 1 - status: 400 - - - name: add measure unknown metric - POST: /v1/metric/fake/measures - request_headers: - content-type: application/json - data: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - status: 404 - - - name: get metric list for authenticated user - request_headers: - x-user-id: foo - x-project-id: bar - GET: /v1/metric - - - name: get measures unknown metric - GET: /v1/metric/fake/measures - status: 404 - - - name: get metric list for aggregates - GET: /v1/metric - status: 200 - response_json_paths: - $[0].archive_policy.name: cookies - - - name: get measure unknown aggregates - GET: /v1/aggregation/metric?metric=$HISTORY['get metric list for aggregates'].$RESPONSE['$[0].id']&aggregation=last - status: 404 - response_strings: - - Aggregation method 'last' for metric $RESPONSE['$[0].id'] does not exist - - - name: aggregate measure unknown metric - GET: /v1/aggregation/metric?metric=cee6ef1f-52cc-4a16-bbb5-648aedfd1c37 - status: 404 - response_strings: - - Metric cee6ef1f-52cc-4a16-bbb5-648aedfd1c37 does not exist - - - name: delete metric - DELETE: /v1/metric/$HISTORY['get metric list for aggregates'].$RESPONSE['$[0].id'] - status: 204 - - - name: delete metric again - DELETE: $LAST_URL - status: 404 - - - name: delete non existent metric - DELETE: /v1/metric/foo - status: 404 diff --git a/gnocchi/tests/functional/gabbits/pagination.yaml b/gnocchi/tests/functional/gabbits/pagination.yaml deleted file mode 100644 index ef85a379d..000000000 --- a/gnocchi/tests/functional/gabbits/pagination.yaml +++ /dev/null @@ -1,506 +0,0 @@ -# -# Test the pagination API -# - -fixtures: - - ConfigFixture - -tests: - -# -# Creation resources for this scenarion -# - - name: post resource 1 - POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: 57a9e836-87b8-4a21-9e30-18a474b98fef - started_at: "2014-01-01T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - - - name: post resource 2 - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: 4facbf7e-a900-406d-a828-82393f7006b3 - started_at: "2014-01-02T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - - - name: post resource 3 - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: 36775172-ebc9-4060-9870-a649361bc3ab - started_at: "2014-01-03T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - - - name: post resource 4 - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: 28593168-52bb-43b5-a6db-fc2343aac02a - started_at: "2014-01-04T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - - - name: post resource 5 - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 - started_at: "2014-01-05T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - -# -# Basic resource limit/ordering tests -# - - name: list first two items default order - GET: /v1/resource/generic?limit=2 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - response_json_paths: - $.`len`: 2 - $[0].id: 57a9e836-87b8-4a21-9e30-18a474b98fef - $[1].id: 4facbf7e-a900-406d-a828-82393f7006b3 - - - name: list next third items default order - GET: /v1/resource/generic?limit=4&marker=4facbf7e-a900-406d-a828-82393f7006b3 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - response_json_paths: - $.`len`: 3 - $[0].id: 36775172-ebc9-4060-9870-a649361bc3ab - $[1].id: 28593168-52bb-43b5-a6db-fc2343aac02a - $[2].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 - - - name: list first two items order by id witouth direction - GET: /v1/resource/generic?limit=2&sort=id - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 200 - response_json_paths: - $.`len`: 2 - $[0].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 - $[1].id: 28593168-52bb-43b5-a6db-fc2343aac02a - - - name: list first two items order by id - GET: /v1/resource/generic?limit=2&sort=id:asc - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - response_json_paths: - $.`len`: 2 - $[0].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 - $[1].id: 28593168-52bb-43b5-a6db-fc2343aac02a - - - name: list next third items order by id - GET: /v1/resource/generic?limit=4&sort=id:asc&marker=28593168-52bb-43b5-a6db-fc2343aac02a - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - response_json_paths: - $.`len`: 3 - $[0].id: 36775172-ebc9-4060-9870-a649361bc3ab - $[1].id: 4facbf7e-a900-406d-a828-82393f7006b3 - $[2].id: 57a9e836-87b8-4a21-9e30-18a474b98fef - - - name: search for some resources with limit, order and marker - POST: /v1/search/resource/generic?limit=2&sort=id:asc&marker=36775172-ebc9-4060-9870-a649361bc3ab - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - "or": [ - {"=": {"id": 36775172-ebc9-4060-9870-a649361bc3ab}}, - {"=": {"id": 4facbf7e-a900-406d-a828-82393f7006b3}}, - {"=": {"id": 57a9e836-87b8-4a21-9e30-18a474b98fef}}, - ] - response_json_paths: - $.`len`: 2 - $[0].id: 4facbf7e-a900-406d-a828-82393f7006b3 - $[1].id: 57a9e836-87b8-4a21-9e30-18a474b98fef - -# -# Invalid resource limit/ordering -# - - name: invalid sort_key - GET: /v1/resource/generic?sort=invalid:asc - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 400 - - - name: invalid sort_dir - GET: /v1/resource/generic?sort=id:invalid - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 400 - - - name: invalid marker - GET: /v1/resource/generic?marker=d44b3f4c-27bc-4ace-b81c-2a8e60026874 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 400 - - - name: invalid negative limit - GET: /v1/resource/generic?limit=-2 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 400 - - - name: invalid limit - GET: /v1/resource/generic?limit=invalid - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 400 - -# -# Default limit -# - - - name: post resource 6 - POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: 465f87b2-61f7-4118-adec-1d96a78af401 - started_at: "2014-01-02T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - - - name: post resource 7 - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: 9b6af245-57df-4ed6-a8c0-f64b77d8867f - started_at: "2014-01-28T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - - - name: post resource 8 - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: d787aa85-5743-4443-84f9-204270bc141a - started_at: "2014-01-31T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - - - name: default limit - GET: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - response_json_paths: - $.`len`: 7 - $[-1].id: 9b6af245-57df-4ed6-a8c0-f64b77d8867f - - - - name: update resource 5 - PATCH: /v1/resource/generic/1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - ended_at: "2014-01-30T02:02:02.000000" - - - name: update resource 5 again - PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - ended_at: "2014-01-31T02:02:02.000000" - - - name: default limit with history and multiple sort key - GET: /v1/resource/generic?history=true&sort=id:asc&sort=ended_at:desc-nullslast - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - response_json_paths: - $.`len`: 7 - $[0].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 - $[0].ended_at: "2014-01-31T02:02:02+00:00" - $[1].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 - $[1].ended_at: "2014-01-30T02:02:02+00:00" - $[2].id: 1e3d5702-2cbf-46e0-ba13-0ddaa3c71150 - $[2].ended_at: null - -# -# Create metrics -# - - name: create archive policy - desc: for later use - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: dummy_policy - definition: - - granularity: 1 second - status: 201 - - - name: create metric with name1 - POST: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - name: "dummy1" - archive_policy_name: dummy_policy - status: 201 - - - name: create metric with name2 - POST: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - name: "dummy2" - archive_policy_name: dummy_policy - status: 201 - - - name: create metric with name3 - POST: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - name: "dummy3" - archive_policy_name: dummy_policy - status: 201 - - - name: create metric with name4 - POST: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - name: "dummy4" - archive_policy_name: dummy_policy - status: 201 - - - name: create metric with name5 - POST: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - name: "dummy5" - archive_policy_name: dummy_policy - status: 201 - - - name: list all default order - GET: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - - - name: list first two metrics default order - GET: /v1/metric?limit=2 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - response_json_paths: - $.`len`: 2 - $[0].name: $RESPONSE['$[0].name'] - $[1].name: $RESPONSE['$[1].name'] - - - name: list next three metrics default order - GET: /v1/metric?limit=4&marker=$HISTORY['list all default order'].$RESPONSE['$[1].id'] - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - response_json_paths: - $.`len`: 3 - $[0].name: $HISTORY['list all default order'].$RESPONSE['$[2].name'] - $[1].name: $HISTORY['list all default order'].$RESPONSE['$[3].name'] - $[2].name: $HISTORY['list all default order'].$RESPONSE['$[4].name'] - - - name: list first two metrics order by user without direction - GET: /v1/metric?limit=2&sort=name - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 200 - response_json_paths: - $.`len`: 2 - $[0].name: dummy1 - $[1].name: dummy2 - - - name: list first two metrics order by user - GET: /v1/metric?limit=2&sort=name:asc - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - response_json_paths: - $.`len`: 2 - $[0].name: dummy1 - $[1].name: dummy2 - - - name: list next third metrics order by user - GET: /v1/metric?limit=4&sort=name:asc&marker=$RESPONSE['$[1].id'] - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - response_json_paths: - $.`len`: 3 - $[0].name: dummy3 - $[1].name: dummy4 - $[2].name: dummy5 - -# -# Default metric limit -# - - - name: create metric with name6 - POST: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - archive_policy_name: dummy_policy - status: 201 - - - name: create metric with name7 - POST: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - archive_policy_name: dummy_policy - status: 201 - - - name: create metric with name8 - POST: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - archive_policy_name: dummy_policy - status: 201 - - - name: default metric limit - GET: /v1/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - response_json_paths: - $.`len`: 7 - -# -# Invalid metrics limit/ordering -# - - - name: metric invalid sort_key - GET: /v1/metric?sort=invalid:asc - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 400 - - - name: metric invalid sort_dir - GET: /v1/metric?sort=id:invalid - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 400 - - - name: metric invalid marker - GET: /v1/metric?marker=d44b3f4c-27bc-4ace-b81c-2a8e60026874 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 400 - - - name: metric invalid negative limit - GET: /v1/metric?limit=-2 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 400 - - - name: metric invalid limit - GET: /v1/metric?limit=invalid - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 400 diff --git a/gnocchi/tests/functional/gabbits/resource-aggregation.yaml b/gnocchi/tests/functional/gabbits/resource-aggregation.yaml deleted file mode 100644 index c03384761..000000000 --- a/gnocchi/tests/functional/gabbits/resource-aggregation.yaml +++ /dev/null @@ -1,169 +0,0 @@ -fixtures: - - ConfigFixture - -tests: - - name: create archive policy - desc: for later use - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: low - definition: - - granularity: 1 second - - granularity: 300 seconds - status: 201 - - - name: create resource 1 - POST: /v1/resource/generic - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json - data: - id: 4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4 - user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - metrics: - cpu.util: - archive_policy_name: low - status: 201 - - - name: post cpuutil measures 1 - POST: /v1/resource/generic/4ed9c196-4c9f-4ba8-a5be-c9a71a82aac4/metric/cpu.util/measures - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json - data: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - status: 202 - - - name: create resource 2 - POST: /v1/resource/generic - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json - data: - id: 1447CD7E-48A6-4C50-A991-6677CC0D00E6 - user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - metrics: - cpu.util: - archive_policy_name: low - status: 201 - - - name: post cpuutil measures 2 - POST: /v1/resource/generic/1447CD7E-48A6-4C50-A991-6677CC0D00E6/metric/cpu.util/measures - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json - data: - - timestamp: "2015-03-06T14:33:57" - value: 23 - - timestamp: "2015-03-06T14:34:12" - value: 8 - status: 202 - - - name: create resource 3 - POST: /v1/resource/generic - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json - data: - id: 33333BC5-5948-4F29-B7DF-7DE607660452 - user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171 - metrics: - cpu.util: - archive_policy_name: low - status: 201 - - - name: post cpuutil measures 3 - POST: /v1/resource/generic/33333BC5-5948-4F29-B7DF-7DE607660452/metric/cpu.util/measures - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json - data: - - timestamp: "2015-03-06T14:33:57" - value: 230 - - timestamp: "2015-03-06T14:34:12" - value: 45.41 - status: 202 - - - name: aggregate metric with groupby on project_id - POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json - data: - =: - user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - poll: - count: 10 - delay: 1 - response_json_paths: - $: - - measures: - - ["2015-03-06T14:30:00+00:00", 300.0, 21.525] - - ["2015-03-06T14:33:57+00:00", 1.0, 33.05] - - ["2015-03-06T14:34:12+00:00", 1.0, 10.0] - group: - project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - - measures: - - ["2015-03-06T14:30:00+00:00", 300.0, 137.70499999999998] - - ["2015-03-06T14:33:57+00:00", 1.0, 230.0] - - ["2015-03-06T14:34:12+00:00", 1.0, 45.41] - group: - project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171 - - - name: aggregate metric with groupby on project_id and invalid group - POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&groupby=thisisdumb - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json - data: - =: - user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - status: 400 - response_strings: - - Invalid groupby attribute - - - name: aggregate metric with groupby on project_id and user_id - POST: /v1/aggregation/resource/generic/metric/cpu.util?groupby=project_id&groupby=user_id - request_headers: - x-user-id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - x-project-id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - content-type: application/json - data: - =: - user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - poll: - count: 10 - delay: 1 - response_json_paths: - $: - - measures: - - ['2015-03-06T14:30:00+00:00', 300.0, 21.525] - - ['2015-03-06T14:33:57+00:00', 1.0, 33.05] - - ['2015-03-06T14:34:12+00:00', 1.0, 10.0] - group: - user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - project_id: c7f32f1f-c5ef-427a-8ecd-915b219c66e8 - - measures: - - ['2015-03-06T14:30:00+00:00', 300.0, 137.70499999999998] - - ['2015-03-06T14:33:57+00:00', 1.0, 230.0] - - ['2015-03-06T14:34:12+00:00', 1.0, 45.41] - group: - user_id: 6c865dd0-7945-4e08-8b27-d0d7f1c2b667 - project_id: ee4cfc41-1cdc-4d2f-9a08-f94111d80171 diff --git a/gnocchi/tests/functional/gabbits/resource-type.yaml b/gnocchi/tests/functional/gabbits/resource-type.yaml deleted file mode 100644 index fca3aaa32..000000000 --- a/gnocchi/tests/functional/gabbits/resource-type.yaml +++ /dev/null @@ -1,772 +0,0 @@ -# -# Test the resource type API to achieve coverage of just the -# ResourceTypesController and ResourceTypeController class code. -# - -fixtures: - - ConfigFixture - -defaults: - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - -tests: - - - name: list resource type - desc: only legacy resource types are present - GET: /v1/resource_type - response_json_paths: - $.`len`: 1 - -# Some bad cases - - - name: post resource type as non-admin - POST: $LAST_URL - data: - name: my_custom_resource - request_headers: - content-type: application/json - status: 403 - - - name: post resource type with existing name - POST: /v1/resource_type - request_headers: - x-roles: admin - content-type: application/json - data: - name: my_custom_resource - attributes: - project_id: - type: string - status: 400 - - - name: post resource type bad string - POST: $LAST_URL - request_headers: - x-roles: admin - content-type: application/json - data: - name: my_custom_resource - attributes: - foo: - type: string - max_length: 32 - min_length: 5 - noexist: foo - status: 400 - response_strings: - # NOTE(sileht): We would prefer to have a better message but voluptuous seems a bit lost when - # an Any have many dict with the same key, here "type" - # - "Invalid input: extra keys not allowed @ data[u'attributes'][u'foo'][u'noexist']" - # - "Invalid input: not a valid value for dictionary value @ data[u'attributes'][u'foo'][u'type']" - - "Invalid input:" - - - name: post resource type bad min_length value - POST: $LAST_URL - request_headers: - x-roles: admin - content-type: application/json - data: - name: my_custom_resource - attributes: - name: - type: string - required: true - max_length: 2 - min_length: 5 - status: 400 - - - name: post resource type bad min value - POST: $LAST_URL - request_headers: - x-roles: admin - content-type: application/json - data: - name: my_custom_resource - attributes: - int: - type: number - required: false - max: 3 - min: 8 - status: 400 - -# Create a type - - - name: post resource type - POST: $LAST_URL - request_headers: - x-roles: admin - content-type: application/json - data: - name: my_custom_resource - attributes: - name: - type: string - required: true - max_length: 5 - min_length: 2 - foobar: - type: string - required: false - uuid: - type: uuid - int: - type: number - required: false - min: -2 - max: 3 - intnomin: - type: number - required: false - max: 3 - float: - type: number - required: false - min: -2.3 - bool: - type: bool - required: false - status: 201 - response_json_paths: - $.name: my_custom_resource - $.state: active - $.attributes: - name: - type: string - required: True - max_length: 5 - min_length: 2 - foobar: - type: string - required: False - max_length: 255 - min_length: 0 - uuid: - type: uuid - required: True - int: - type: number - required: False - min: -2 - max: 3 - intnomin: - type: number - required: False - min: - max: 3 - float: - type: number - required: false - min: -2.3 - max: - bool: - type: bool - required: false - - response_headers: - location: $SCHEME://$NETLOC/v1/resource_type/my_custom_resource - -# Control the created type - - - name: relist resource types - desc: we have a resource type now - GET: $LAST_URL - response_json_paths: - $.`len`: 2 - $.[1].name: my_custom_resource - $.[1].state: active - - - name: get the custom resource type - GET: /v1/resource_type/my_custom_resource - response_json_paths: - $.name: my_custom_resource - $.state: active - $.attributes: - name: - type: string - required: True - min_length: 2 - max_length: 5 - foobar: - type: string - required: False - min_length: 0 - max_length: 255 - uuid: - type: uuid - required: True - int: - type: number - required: False - min: -2 - max: 3 - intnomin: - type: number - required: False - min: - max: 3 - float: - type: number - required: false - min: -2.3 - max: - bool: - type: bool - required: false - -# Some bad case case on the type - - - name: delete as non-admin - DELETE: $LAST_URL - status: 403 - -# Bad resources for this type - - - name: post invalid resource - POST: /v1/resource/my_custom_resource - request_headers: - content-type: application/json - data: - id: d11edfca-4393-4fda-b94d-b05a3a1b3747 - name: toolong!!! - foobar: what - uuid: 07eb339e-23c0-4be2-be43-cd8247afae3b - status: 400 - response_strings: - # split to not match the u' in py2 - - "Invalid input: length of value must be at most 5 for dictionary value @ data[" - - "'name']" - - - name: post invalid resource uuid - POST: $LAST_URL - request_headers: - content-type: application/json - data: - id: d11edfca-4393-4fda-b94d-b05a3a1b3747 - name: too - foobar: what - uuid: really! - status: 400 - response_strings: - # split to not match the u' in py2 - - "Invalid input: not a valid value for dictionary value @ data[" - - "'uuid']" - -# Good resources for this type - - - name: post custom resource - POST: $LAST_URL - request_headers: - content-type: application/json - data: - id: d11edfca-4393-4fda-b94d-b05a3a1b3747 - name: bar - foobar: what - uuid: e495ebad-be64-46c0-81d6-b079beb48df9 - int: 1 - status: 201 - response_json_paths: - $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747 - $.name: bar - $.foobar: what - - - name: patch custom resource - PATCH: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 - request_headers: - content-type: application/json - data: - name: foo - status: 200 - response_json_paths: - $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747 - $.name: foo - $.foobar: what - $.uuid: e495ebad-be64-46c0-81d6-b079beb48df9 - $.int: 1 - - - name: get resource - GET: $LAST_URL - request_headers: - content-type: application/json - response_json_paths: - $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747 - $.name: foo - $.foobar: what - $.uuid: e495ebad-be64-46c0-81d6-b079beb48df9 - $.int: 1 - - - name: post resource with default - POST: /v1/resource/my_custom_resource - request_headers: - content-type: application/json - data: - id: c4110aec-6e5c-43fa-b8c5-ffdfbca3ce59 - name: foo - uuid: e495ebad-be64-46c0-81d6-b079beb48df9 - status: 201 - response_json_paths: - $.id: c4110aec-6e5c-43fa-b8c5-ffdfbca3ce59 - $.name: foo - $.foobar: - $.uuid: e495ebad-be64-46c0-81d6-b079beb48df9 - $.int: - - - name: list resource history - GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast - request_headers: - content-type: application/json - response_json_paths: - $.`len`: 2 - $[0].id: d11edfca-4393-4fda-b94d-b05a3a1b3747 - $[0].name: bar - $[0].foobar: what - $[1].id: d11edfca-4393-4fda-b94d-b05a3a1b3747 - $[1].name: foo - $[1].foobar: what - -# CRUD resource type attributes - - - name: post a new resource attribute - PATCH: /v1/resource_type/my_custom_resource - request_headers: - x-roles: admin - content-type: application/json-patch+json - data: - - op: add - path: /attributes/newstuff - value: - type: string - required: False - min_length: 0 - max_length: 255 - - op: add - path: /attributes/newfilled - value: - type: string - required: False - min_length: 0 - max_length: 255 - options: - fill: "filled" - - op: add - path: /attributes/newbool - value: - type: bool - required: True - options: - fill: True - - op: add - path: /attributes/newint - value: - type: number - required: True - min: 0 - max: 255 - options: - fill: 15 - - op: add - path: /attributes/newstring - value: - type: string - required: True - min_length: 0 - max_length: 255 - options: - fill: "foobar" - - op: add - path: /attributes/newuuid - value: - type: uuid - required: True - options: - fill: "00000000-0000-0000-0000-000000000000" - - op: remove - path: /attributes/foobar - status: 200 - response_json_paths: - $.name: my_custom_resource - $.attributes: - name: - type: string - required: True - min_length: 2 - max_length: 5 - uuid: - type: uuid - required: True - int: - type: number - required: False - min: -2 - max: 3 - intnomin: - type: number - required: False - min: - max: 3 - float: - type: number - required: false - min: -2.3 - max: - bool: - type: bool - required: false - newstuff: - type: string - required: False - min_length: 0 - max_length: 255 - newfilled: - type: string - required: False - min_length: 0 - max_length: 255 - newstring: - type: string - required: True - min_length: 0 - max_length: 255 - newbool: - type: bool - required: True - newint: - type: number - required: True - min: 0 - max: 255 - newuuid: - type: uuid - required: True - - - name: post a new resource attribute with missing fill - PATCH: /v1/resource_type/my_custom_resource - request_headers: - x-roles: admin - content-type: application/json-patch+json - data: - - op: add - path: /attributes/missing - value: - type: bool - required: True - options: {} - status: 400 - response_strings: - - "Invalid input: Option 'fill' of resource attribute missing is invalid: must not be empty if required=True" - - - name: post a new resource attribute with incorrect fill - PATCH: /v1/resource_type/my_custom_resource - request_headers: - x-roles: admin - content-type: application/json-patch+json - data: - - op: add - path: /attributes/incorrect - value: - type: number - required: True - options: - fill: "a-string" - status: 400 - response_strings: - - "Invalid input: Option 'fill' of resource attribute incorrect is invalid: expected Real" - - - name: get the new custom resource type - GET: /v1/resource_type/my_custom_resource - response_json_paths: - $.name: my_custom_resource - $.attributes: - name: - type: string - required: True - min_length: 2 - max_length: 5 - uuid: - type: uuid - required: True - int: - type: number - required: False - min: -2 - max: 3 - intnomin: - type: number - required: False - min: - max: 3 - float: - type: number - required: false - min: -2.3 - max: - bool: - type: bool - required: false - newstuff: - type: string - required: False - min_length: 0 - max_length: 255 - newfilled: - type: string - required: False - min_length: 0 - max_length: 255 - newstring: - type: string - required: True - min_length: 0 - max_length: 255 - newbool: - type: bool - required: True - newint: - type: number - required: True - min: 0 - max: 255 - newuuid: - type: uuid - required: True - - - name: control new attributes of existing resource - GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 - request_headers: - content-type: application/json - status: 200 - response_json_paths: - $.id: d11edfca-4393-4fda-b94d-b05a3a1b3747 - $.name: foo - $.newstuff: null - $.newfilled: "filled" - $.newbool: true - $.newint: 15 - $.newstring: foobar - $.newuuid: "00000000-0000-0000-0000-000000000000" - - - name: control new attributes of existing resource history - GET: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747/history?sort=revision_end:asc-nullslast - request_headers: - content-type: application/json - response_json_paths: - $.`len`: 2 - $[0].id: d11edfca-4393-4fda-b94d-b05a3a1b3747 - $[0].name: bar - $[0].newstuff: null - $[0].newfilled: "filled" - $[0].newbool: true - $[0].newint: 15 - $[0].newstring: foobar - $[0].newuuid: "00000000-0000-0000-0000-000000000000" - $[1].id: d11edfca-4393-4fda-b94d-b05a3a1b3747 - $[1].name: foo - $[1].newstuff: null - $[1].newfilled: "filled" - $[1].newbool: true - $[1].newint: 15 - $[1].newstring: foobar - $[1].newuuid: "00000000-0000-0000-0000-000000000000" - -# Invalid patch - - - name: add/delete the same resource attribute - PATCH: /v1/resource_type/my_custom_resource - request_headers: - x-roles: admin - content-type: application/json-patch+json - data: - - op: add - path: /attributes/what - value: - type: string - required: False - min_length: 0 - max_length: 255 - - op: remove - path: /attributes/what - status: 200 - response_json_paths: - $.name: my_custom_resource - $.attributes: - name: - type: string - required: True - min_length: 2 - max_length: 5 - uuid: - type: uuid - required: True - int: - type: number - required: False - min: -2 - max: 3 - intnomin: - type: number - required: False - min: - max: 3 - float: - type: number - required: false - min: -2.3 - max: - bool: - type: bool - required: false - newstuff: - type: string - required: False - min_length: 0 - max_length: 255 - newfilled: - type: string - required: False - min_length: 0 - max_length: 255 - newstring: - type: string - required: True - min_length: 0 - max_length: 255 - newbool: - type: bool - required: True - newint: - type: number - required: True - min: 0 - max: 255 - newuuid: - type: uuid - required: True - - - name: delete/add the same resource attribute - PATCH: /v1/resource_type/my_custom_resource - request_headers: - x-roles: admin - content-type: application/json-patch+json - data: - - op: remove - path: /attributes/what - - op: add - path: /attributes/what - value: - type: string - required: False - min_length: 0 - max_length: 255 - status: 400 - response_strings: - - "can't remove non-existent object 'what'" - - - name: patch a resource attribute replace - PATCH: /v1/resource_type/my_custom_resource - request_headers: - x-roles: admin - content-type: application/json-patch+json - data: - - op: replace - path: /attributes/newstuff - value: - type: string - required: False - min_length: 0 - max_length: 255 - status: 400 - response_strings: - - "Invalid input: not a valid value for dictionary value @ data[0][" - - "'op']" - - - name: patch a resource attribute type not exist - PATCH: /v1/resource_type/my_custom_resource - request_headers: - x-roles: admin - content-type: application/json-patch+json - data: - - op: add - path: /attributes/newstuff - value: - type: notexist - required: False - min_length: 0 - max_length: 255 - status: 400 - - - name: patch a resource attribute type unknown - PATCH: /v1/resource_type/my_custom_resource - request_headers: - x-roles: admin - content-type: application/json-patch+json - data: - - op: remove - path: /attributes/unknown - status: 400 - response_strings: - - "can't remove non-existent object 'unknown'" - -# Ensure we can't delete the type - - - name: delete in use resource_type - DELETE: /v1/resource_type/my_custom_resource - request_headers: - x-roles: admin - status: 400 - response_strings: - - Resource type my_custom_resource is still in use - -# Delete associated resources - - - name: delete the resource - DELETE: /v1/resource/my_custom_resource/d11edfca-4393-4fda-b94d-b05a3a1b3747 - request_headers: - x-roles: admin - status: 204 - - - name: delete the second resource - DELETE: /v1/resource/my_custom_resource/c4110aec-6e5c-43fa-b8c5-ffdfbca3ce59 - request_headers: - x-roles: admin - status: 204 - -# Now we can deleted the type - - - name: delete the custom resource type - DELETE: /v1/resource_type/my_custom_resource - request_headers: - x-roles: admin - status: 204 - - - name: delete non-existing custom resource type - DELETE: $LAST_URL - request_headers: - x-roles: admin - status: 404 - - - name: delete missing custom resource type utf8 - DELETE: /v1/resource_type/%E2%9C%94%C3%A9%C3%B1%E2%98%83 - request_headers: - x-roles: admin - status: 404 - response_strings: - - Resource type ✔éñ☃ does not exist - -# Can we readd and delete the same resource type again - - - name: post resource type again - POST: /v1/resource_type - request_headers: - x-roles: admin - content-type: application/json - data: - name: my_custom_resource - status: 201 - - - name: delete the custom resource type again - DELETE: /v1/resource_type/my_custom_resource - request_headers: - x-roles: admin - status: 204 diff --git a/gnocchi/tests/functional/gabbits/resource.yaml b/gnocchi/tests/functional/gabbits/resource.yaml deleted file mode 100644 index a9d7e0407..000000000 --- a/gnocchi/tests/functional/gabbits/resource.yaml +++ /dev/null @@ -1,1106 +0,0 @@ -# -# Test the resource API to achieve coverage of just the -# ResourcesController and ResourceController class code. -# - -fixtures: - - ConfigFixture - -tests: - -# We will need an archive for use in later tests so we create it -# here. This could be done in a fixture but since the API allows it -# may as well use it. - - - name: create archive policy - desc: for later use - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: medium - definition: - - granularity: 1 second - status: 201 - - - name: create archive policy rule - POST: /v1/archive_policy_rule - request_headers: - content-type: application/json - x-roles: admin - data: - name: test_rule - metric_pattern: "disk.io.*" - archive_policy_name: medium - status: 201 - -# The top of the API is a bit confusing and presents some URIs which -# are not very useful. This isn't strictly a bug but does represent -# a measure of unfriendliness that we may wish to address. Thus the -# xfails. - - - name: root of all - GET: / - response_headers: - content-type: /application/json/ - response_json_paths: - $.versions[0].links[0].href: $SCHEME://$NETLOC/v1/ - - - name: root of v1 - GET: /v1 - redirects: true - response_json_paths: - $.version: "1.0" - $.links.`len`: 11 - $.links[0].href: $SCHEME://$NETLOC/v1 - $.links[7].href: $SCHEME://$NETLOC/v1/resource - - - name: root of resource - GET: /v1/resource - response_json_paths: - $.generic: $SCHEME://$NETLOC/v1/resource/generic - - - name: typo of resource - GET: /v1/resoue - status: 404 - - - name: typo of resource extra - GET: /v1/resource/foobar - status: 404 - -# Explore that GETting a list of resources demonstrates the expected -# behaviors notably with regard to content negotiation. - - - name: generic resource list - desc: there are no generic resources yet - GET: /v1/resource/generic - response_strings: - - "[]" - - - name: generic resource bad accept - desc: Expect 406 on bad accept type - GET: $LAST_URL - request_headers: - accept: text/plain - status: 406 - response_strings: - - 406 Not Acceptable - - - name: generic resource complex accept - desc: failover accept media type appropriately - GET: $LAST_URL - request_headers: - accept: text/plain, application/json; q=0.8 - response_strings: - - "[]" - -# Try creating a new generic resource in various ways. - - - name: generic resource - desc: there are no generic resources yet - GET: /v1/resource/generic - response_strings: - - "[]" - - - name: post resource no user-id - desc: https://bugs.launchpad.net/gnocchi/+bug/1424005 - POST: $LAST_URL - request_headers: - # Only provide one of these auth headers - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - content-type: application/json - data: - id: f93454f2-d8a5-4d67-9985-02511241e7f3 - started_at: "2014-01-03T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - - - name: post generic resource - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: f93450f2-d8a5-4d67-9985-02511241e7d1 - started_at: "2014-01-03T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - response_headers: - location: $SCHEME://$NETLOC/v1/resource/generic/f93450f2-d8a5-4d67-9985-02511241e7d1 - content-type: /^application\/json/ - response_json_paths: - $.created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - $.created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c - $.user_id: 0fbb231484614b1a80131fc22f6afc9c - - - name: post same resource refuse - desc: We can only post one identified resource once - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: f93450f2-d8a5-4d67-9985-02511241e7d1 - started_at: "2014-01-03T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 409 - - - name: post generic resource bad content type - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: text/plain - data: '{"id": "f93450f2-d8a5-4d67-9985-02511241e7d1", "started_at": "2014-01-03T02:02:02.000000", "user_id": "0fbb231484614b1a80131fc22f6afc9c", "project_id": "f3d41b770cc14f0bb94a1d5be9c0e3ea"}' - status: 415 - -# Create a new generic resource, demonstrate that including no data -# gets a useful 400 response. - - - name: post generic resource no data - POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 400 - - - name: post generic with invalid metric name - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - metrics: - "disk/iops": - archive_policy_name: medium - status: 400 - response_strings: - - "'/' is not supported in metric name" - - - name: post generic resource to modify - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: 75C44741-CC60-4033-804E-2D3098C7D2E9 - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - response_json_paths: - $.metrics: {} # empty dictionary - -# PATCH that generic resource to change its attributes and to -# associate metrics. If a metric does not exist there should be a -# graceful failure. - - name: patch generic resource - PATCH: $LOCATION - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - user_id: foobar - status: 200 - response_json_paths: - user_id: foobar - - - name: patch generic resource with same data - desc: Ensure no useless revision have been created - PATCH: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - user_id: foobar - status: 200 - response_json_paths: - user_id: foobar - revision_start: $RESPONSE['$.revision_start'] - - - name: patch generic resource with id - PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: foobar - status: 400 - response_strings: - - "Invalid input: extra keys not allowed @ data[" - - "'id']" - - - name: patch generic with metrics - PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - metrics: - disk.iops: - archive_policy_name: medium - status: 200 - response_strings: - - '"disk.iops": ' - - - name: get generic history - desc: Ensure we can get the history - GET: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9/history?sort=revision_end:asc-nullslast - request_headers: - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - response_json_paths: - $.`len`: 2 - $[1].revision_end: null - $[1].metrics.'disk.iops': $RESPONSE["metrics.'disk.iops'"] - - - name: patch generic bad metric association - PATCH: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - metrics: - disk.iops: f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea - status: 400 - response_strings: - - Metric f3d41b77-0cc1-4f0b-b94a-1d5be9c0e3ea does not exist - - - name: patch generic with bad archive policy - PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - metrics: - disk.iops: - archive_policy_name: noexist - status: 400 - response_strings: - - Archive policy noexist does not exist - - - name: patch generic with no archive policy rule - PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - metrics: - disk.iops: {} - status: 400 - response_strings: - - No archive policy name specified and no archive policy rule found matching the metric name disk.iops - - - name: patch generic with archive policy rule - PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - metrics: - disk.io.rate: {} - status: 200 - - - name: get patched resource - desc: confirm the patched resource is properly patched - GET: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - user_id: foobar - - - name: patch resource empty dict - desc: an empty dict in patch is an existence check - PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: "{}" - status: 200 - data: - user_id: foobar - - - name: patch resource without change with metrics in response - desc: an empty dict in patch is an existence check - PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: "{}" - status: 200 - response_json_paths: - $.metrics.'disk.io.rate': $RESPONSE["$.metrics.'disk.io.rate'"] - - - name: patch generic with invalid metric name - PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - metrics: - "disk/iops": - archive_policy_name: medium - status: 400 - response_strings: - - "'/' is not supported in metric name" - -# Failure modes for history - - - name: post generic history - desc: should don't work - POST: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9/history - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 405 - - - name: delete generic history - desc: should don't work - DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 405 - -# Failure modes for PATCHing a resource - - - name: patch resource no data - desc: providing no data is an error - PATCH: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 400 - response_strings: - - "Unable to decode body:" - - - name: patch resource bad data - desc: providing data that is not a dict is an error - PATCH: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 400 - data: - - Beer and pickles - response_strings: - - "Invalid input: expected a dictionary" - - - name: patch noexit resource - desc: "patching something that doesn't exist is a 404" - PATCH: /v1/resource/generic/77777777-CC60-4033-804E-2D3098C7D2E9 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 404 - -# GET single resource failure modes - - - name: get noexist resource - desc: if a resource does not exist 404 - GET: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 404 - response_strings: - - The resource could not be found. - - - name: get bad resource id - desc: https://bugs.launchpad.net/gnocchi/+bug/1425588 - GET: /v1/resource/generic/noexist - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 404 - response_strings: - - The resource could not be found. - - - name: get metrics for this not-existing resource - GET: /v1/resource/generic/77777777-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 404 - -# List resources - - - name: list generic resources no auth - GET: /v1/resource/generic - response_strings: - - "[]" - - - name: list generic resources - GET: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - response_json_paths: - $[0].user_id: 0fbb231484614b1a80131fc22f6afc9c - $[-1].user_id: foobar - - - name: list all resources - GET: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - response_strings: - - '"type": "generic"' - -# Metric handling when POSTing resources. - - - name: post new generic with non-existent metrics - POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: 85C44741-CC60-4033-804E-2D3098C7D2E9 - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - metrics: - cpu.util: 10 - status: 400 - - - name: post new generic with metrics bad policy - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: 85C44741-CC60-4033-804E-2D3098C7D2E9 - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - metrics: - cpu.util: - archive_policy_name: noexist - status: 400 - - - name: post new generic with metrics no policy rule - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: 85BABE39-F7F7-455A-877B-62C22E11AA40 - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - metrics: - cpu.util: {} - status: 400 - response_strings: - - No archive policy name specified and no archive policy rule found matching the metric name cpu.util - - - name: post new generic with metrics using policy rule - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: 85BABE39-F7F7-455A-877B-62C22E11AA40 - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - metrics: - disk.io.rate: {} - status: 201 - - - name: post new generic with metrics - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: d13982cb-4cce-4f84-a96e-7581be1e599c - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - metrics: - disk.util: - archive_policy_name: medium - status: 201 - response_json_paths: - created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c - created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - - - name: post new generic with metrics and un-normalized user/project id from keystone middleware - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: 85C44741-CC60-4033-804E-2D3098C7D2E9 - metrics: - cpu.util: - archive_policy_name: medium - status: 201 - response_json_paths: - created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c - created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - - - - name: get metrics for this resource - desc: with async measure handling this is a null test - GET: /v1/resource/generic/$RESPONSE['$.id']/metric/cpu.util/measures - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - response_strings: - - "[]" - -# Interrogate the NamedMetricController - - - name: list the generics - GET: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - - - name: request metrics from one of the generics - GET: /v1/resource/generic/$RESPONSE['$[-1].id']/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - response_json_paths: - $.`len`: 1 - $[0].name: cpu.util - $[0].resource_id: 85c44741-cc60-4033-804e-2d3098c7d2e9 - - - name: request metrics from non uuid metrics - desc: 404 from GenericResourceController - GET: /v1/resource/generic/not.a.uuid/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 404 - - - name: request cpuutil metric from generic - GET: /v1/resource/generic/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - response_json_paths: - $.created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - $.archive_policy.name: medium - - - name: try post cpuutil metric to generic - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 405 - - - name: request cpuutil measures from generic - desc: with async measure handling this is a null test - GET: /v1/resource/generic/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/cpu.util/measures - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - response_strings: - - "[]" - - - name: post cpuutil measures - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - status: 202 - - - name: request cpuutil measures again - GET: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - poll: - count: 50 - delay: .1 - response_json_paths: - $[0][0]: "2015-03-06T14:33:57+00:00" - $[0][1]: 1.0 - $[0][2]: 43.100000000000001 - - - name: post metric at generic - POST: /v1/resource/generic/85C44741-CC60-4033-804E-2D3098C7D2E9/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 204 - data: - electron.spin: - archive_policy_name: medium - response_headers: - - - name: post metric at generic with empty definition - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 400 - data: - foo.bar: {} - response_strings: - - No archive policy name specified and no archive policy rule found matching the metric name foo.bar - - - name: post metric at generic using archive policy rule - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 204 - data: - disk.io.rate: {} - - - name: duplicate metrics at generic - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 409 - data: - electron.spin: - archive_policy_name: medium - response_strings: - - Named metric electron.spin already exists - - - name: post metrics at generic bad policy - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 400 - data: - electron.charge: - archive_policy_name: high - response_strings: - - Archive policy high does not exist - -# Check bad timestamps - - - name: post new generic with bad timestamp - POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: 95C44741-CC60-4033-804E-2D3098C7D2E9 - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - metrics: - cpu.util: - archive_policy_name: medium - ended_at: "2001-12-15T02:59:43" - started_at: "2014-12-15T02:59:43" - status: 400 - response_strings: - - Start timestamp cannot be after end timestamp - -# Post metrics to unknown resource - - - name: post to non uuid metrics - desc: 404 from GenericResourceController - POST: /v1/resource/generic/not.a.uuid/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - some.metric: - archive_policy_name: medium - status: 404 - - - name: post to missing uuid metrics - desc: 404 from NamedMetricController - POST: /v1/resource/generic/d5a5994e-ee90-11e4-88cf-685b35afa334/metric - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - some.metric: - archive_policy_name: medium - status: 404 - -# Post measurements on unknown things - - - name: post measure on unknown metric - desc: 404 from NamedMetricController with metric error - POST: /v1/resource/generic/85C44741-CC60-4033-804E-2D3098C7D2E9/metric/unknown/measures - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - status: 404 - response_strings: - - Metric unknown does not exist - -# DELETE-ing generics - - - name: delete generic - DELETE: /v1/resource/generic/75C44741-CC60-4033-804E-2D3098C7D2E9 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 204 - - - name: delete noexist generic - DELETE: /v1/resource/generic/77777777-CC60-4033-804E-2D3098C7D2E9 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 404 - -# Delete a batch of resources by attributes filter - - - name: create resource one - desc: before test batch delete, create some resources using a float in started_at - POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: f93450f2-aaaa-4d67-9985-02511241e7d1 - started_at: 1388714522.0 - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - - - name: create resource two - desc: before test batch delete, create some resources - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: f93450f2-bbbb-4d67-9985-02511241e7d1 - started_at: "2014-01-03T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - - - name: create resource three - desc: before test batch delete, create some resources - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: f93450f2-cccc-4d67-9985-02511241e7d1 - started_at: "2014-08-04T00:00:00.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - - - name: create resource four - desc: before test batch delete, create some resources - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: f93450f2-dddd-4d67-9985-02511241e7d1 - started_at: "2014-08-04T00:00:00.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - - - name: create resource five - desc: before test batch delete, create some resources - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: f93450f2-eeee-4d67-9985-02511241e7d1 - started_at: "2015-08-14T00:00:00.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - - - name: create resource six - desc: before test batch delete, create some resources - POST: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - id: f93450f2-ffff-4d67-9985-02511241e7d1 - started_at: "2015-08-14T00:00:00.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - - - name: get resource one - desc: ensure the resources exists - GET: /v1/resource/generic/f93450f2-aaaa-4d67-9985-02511241e7d1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 200 - - - name: get resource two - desc: ensure the resources exists - GET: /v1/resource/generic/f93450f2-bbbb-4d67-9985-02511241e7d1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 200 - - - name: get resource three - desc: ensure the resources exists - GET: /v1/resource/generic/f93450f2-cccc-4d67-9985-02511241e7d1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 200 - - - name: get resource four - desc: ensure the resources exists - GET: /v1/resource/generic/f93450f2-dddd-4d67-9985-02511241e7d1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 200 - - - name: get resource five - desc: ensure the resources exists - GET: /v1/resource/generic/f93450f2-eeee-4d67-9985-02511241e7d1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 200 - - - name: get resource six - desc: ensure the resources exists - GET: /v1/resource/generic/f93450f2-ffff-4d67-9985-02511241e7d1 - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - status: 200 - - - name: delete random data structure - desc: delete an empty list test - DELETE: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - resource_ids: - [] - attrs: - test - status: 400 - - - name: delete something empty - desc: use empty filter for delete - DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: "" - status: 400 - - - name: delete something empty a - desc: use empty filter for delete - DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - in: - id: [] - status: 200 - response_json_paths: - $.deleted: 0 - - - name: delete something empty b - desc: use empty filter for delete - DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - in: {} - status: 400 - - - name: delete something empty c - desc: use empty filter for delete - DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - in: - and: [] - status: 400 - - - name: delete something empty d - desc: use empty filter for delete - DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - in: - and: - - or: [] - - id: - =: "" - status: 400 - - - name: delete something empty e - desc: use empty filter for delete - DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - and: [] - status: 400 - - - name: delete something empty f - desc: use empty filter for delete - DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - and: - - in: - id: [] - - started_at: "" - status: 400 - - - name: delete batch of resources filter by started_at - desc: delete the created resources - DELETE: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - eq: - started_at: "2014-08-04" - status: 200 - response_json_paths: - $.deleted: 2 - - - name: delete batch of resources filter by multiple ids - desc: delete the created resources - DELETE: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - in: - id: - - f93450f2-aaaa-4d67-9985-02511241e7d1 - - f93450f2-bbbb-4d67-9985-02511241e7d1 - status: 200 - response_json_paths: - $.deleted: 2 - - - name: delete both existent and non-existent data - desc: delete exits and non-exist data - DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - in: - id: - - f93450f2-eeee-4d67-9985-02511241e7d1 - - f93450f2-ffff-4d67-9985-02511241e7d1 - - f93450f2-yyyy-4d67-9985-02511241e7d1 - - f93450f2-xxxx-4d67-9985-02511241e7d1 - status: 200 - response_json_paths: - $.deleted: 2 - - - name: delete multiple non-existent resources - desc: delete a batch of non-existent resources - DELETE: $LAST_URL - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - data: - in: - id: - - f93450f2-zzzz-4d67-9985-02511241e7d1 - - f93450f2-kkkk-4d67-9985-02511241e7d1 - status: 200 - response_json_paths: - $.deleted: 0 diff --git a/gnocchi/tests/functional/gabbits/search-metric.yaml b/gnocchi/tests/functional/gabbits/search-metric.yaml deleted file mode 100644 index 4f477b71c..000000000 --- a/gnocchi/tests/functional/gabbits/search-metric.yaml +++ /dev/null @@ -1,143 +0,0 @@ -# -# Test the search API to achieve coverage of just the -# SearchController and SearchMetricController class code. -# - -fixtures: - - ConfigFixture - -defaults: - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - -tests: - - name: create archive policy - desc: for later use - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-roles: admin - data: - name: high - definition: - - granularity: 1 second - timespan: 1 hour - - granularity: 2 second - timespan: 1 hour - response_headers: - location: $SCHEME://$NETLOC/v1/archive_policy/high - status: 201 - - - name: create metric - POST: /v1/metric - request_headers: - content-type: application/json - data: - archive_policy_name: high - status: 201 - - - name: post measures - desc: for later use - POST: /v1/batch/metrics/measures - request_headers: - content-type: application/json - data: - $RESPONSE['$.id']: - - timestamp: "2014-10-06T14:34:12" - value: 12 - - timestamp: "2014-10-06T14:34:14" - value: 12 - - timestamp: "2014-10-06T14:34:16" - value: 12 - - timestamp: "2014-10-06T14:34:18" - value: 12 - - timestamp: "2014-10-06T14:34:20" - value: 12 - - timestamp: "2014-10-06T14:34:22" - value: 12 - - timestamp: "2014-10-06T14:34:24" - value: 12 - - timestamp: "2014-10-06T14:34:26" - value: 12 - - timestamp: "2014-10-06T14:34:28" - value: 12 - - timestamp: "2014-10-06T14:34:30" - value: 12 - - timestamp: "2014-10-06T14:34:32" - value: 12 - - timestamp: "2014-10-06T14:34:34" - value: 12 - status: 202 - - - name: get metric id - GET: /v1/metric - status: 200 - response_json_paths: - $[0].archive_policy.name: high - - - name: search with one correct granularity - POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=1s - request_headers: - content-type: application/json - data: - "=": 12 - status: 200 - - - name: search with multiple correct granularities - POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=1second&granularity=2s - request_headers: - content-type: application/json - data: - "=": 12 - status: 200 - - - name: search with correct and incorrect granularities - POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=1s&granularity=300 - request_headers: - content-type: application/json - data: - "=": 12 - status: 400 - response_strings: - - Granularity '300.0' for metric $HISTORY['get metric id'].$RESPONSE['$[0].id'] does not exist - - - name: search with incorrect granularity - POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&granularity=300 - request_headers: - content-type: application/json - data: - "=": 12 - status: 400 - response_strings: - - Granularity '300.0' for metric $HISTORY['get metric id'].$RESPONSE['$[0].id'] does not exist - - - name: search measure with wrong start - POST: /v1/search/metric?metric_id=$HISTORY['get metric id'].$RESPONSE['$[0].id']&start=foobar - request_headers: - content-type: application/json - data: - ∧: - - ≥: 1000 - status: 400 - response_strings: - - Invalid value for start - - - name: create metric 2 - POST: /v1/metric - request_headers: - content-type: application/json - data: - archive_policy_name: "high" - status: 201 - - - name: search measure with wrong stop - POST: /v1/search/metric?metric_id=$RESPONSE['$.id']&stop=foobar - request_headers: - content-type: application/json - data: - ∧: - - ≥: 1000 - status: 400 - response_strings: - - Invalid value for stop diff --git a/gnocchi/tests/functional/gabbits/search.yaml b/gnocchi/tests/functional/gabbits/search.yaml deleted file mode 100644 index c8f9bc2db..000000000 --- a/gnocchi/tests/functional/gabbits/search.yaml +++ /dev/null @@ -1,89 +0,0 @@ -# -# Test the search API to achieve coverage of just the -# SearchController and SearchResourceController class code. -# - -fixtures: - - ConfigFixture - -defaults: - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - -tests: - - name: typo of search - GET: /v1/search/notexists - status: 404 - - - name: typo of search in resource - GET: /v1/search/resource/foobar - status: 404 - - - name: search with invalid uuid - POST: /v1/search/resource/generic - request_headers: - content-type: application/json - data: - =: - id: "cd9eef" - - - name: post generic resource - POST: /v1/resource/generic - request_headers: - content-type: application/json - data: - id: faef212f-0bf4-4030-a461-2186fef79be0 - started_at: "2014-01-03T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - - - name: post generic resource twice - POST: /v1/resource/generic - request_headers: - content-type: application/json - data: - id: df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e - started_at: "2014-01-03T02:02:02.000000" - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 201 - - - name: search in_ - POST: /v1/search/resource/generic - request_headers: - content-type: application/json - data: - in: - id: - - faef212f-0bf4-4030-a461-2186fef79be0 - - df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e - response_json_paths: - $.`len`: 2 - - - name: search like created_by_project_id - POST: /v1/search/resource/generic - request_headers: - content-type: application/json - data: - eq: - created_by_project_id: - - f3d41b770cc14f0bb94a1d5be9c0e3ea - response_json_paths: - $.`len`: 0 - - - name: search in_ query string - POST: /v1/search/resource/generic?filter=id%20in%20%5Bfaef212f-0bf4-4030-a461-2186fef79be0%2C%20df7e5e75-6a1d-4ff7-85cb-38eb9d75da7e%5D - request_headers: - content-type: application/json - response_json_paths: - $.`len`: 2 - - - name: search empty query - POST: /v1/search/resource/generic - request_headers: - content-type: application/json - data: {} - response_json_paths: - $.`len`: 2 diff --git a/gnocchi/tests/functional/gabbits/transformedids.yaml b/gnocchi/tests/functional/gabbits/transformedids.yaml deleted file mode 100644 index cc544f119..000000000 --- a/gnocchi/tests/functional/gabbits/transformedids.yaml +++ /dev/null @@ -1,184 +0,0 @@ -# -# Test the resource API to achieve coverage of just the -# ResourcesController and ResourceController class code. -# - -fixtures: - - ConfigFixture - -defaults: - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9c - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - content-type: application/json - -tests: - -# We will need an archive for use in later tests so we create it -# here. This could be done in a fixture but since the API allows it -# may as well use it. - - - name: create archive policy - desc: for later use - POST: /v1/archive_policy - request_headers: - x-roles: admin - data: - name: medium - definition: - - granularity: 1 second - status: 201 -# Check transformed uuids across the URL hierarchy - - - name: post new resource non uuid for duplication test - POST: /v1/resource/generic - data: - id: generic zero - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - metrics: - cpu.util: - archive_policy_name: medium - status: 201 - response_json_paths: - created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c - created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - response_headers: - # is a UUID - location: /v1/resource/generic/[a-f0-9-]{36}/ - - - name: post new resource non uuid duplication - POST: /v1/resource/generic - data: - id: generic zero - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - metrics: - cpu.util: - archive_policy_name: medium - status: 409 - - - name: post new resource with invalid uuid - POST: /v1/resource/generic - data: - id: 'id-with-/' - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - status: 400 - response_strings: - - "'/' is not supported in resource id" - - - - name: post new resource non uuid again different user - POST: /v1/resource/generic - request_headers: - x-user-id: 0fbb231484614b1a80131fc22f6afc9b - x-project-id: f3d41b770cc14f0bb94a1d5be9c0e3ea - data: - id: generic zero - metrics: - cpu.util: - archive_policy_name: medium - status: 201 - response_json_paths: - created_by_user_id: 0fbb231484614b1a80131fc22f6afc9b - created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - response_headers: - # is a UUID - location: /v1/resource/generic/[a-f0-9-]{36}/ - - - name: post new resource non uuid - POST: /v1/resource/generic - data: - id: generic one - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - metrics: - cpu.util: - archive_policy_name: medium - status: 201 - response_json_paths: - created_by_user_id: 0fbb231484614b1a80131fc22f6afc9c - created_by_project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - response_headers: - # is a UUID - location: /v1/resource/generic/[a-f0-9-]{36}/ - - - name: get new non uuid resource by external id - GET: /v1/resource/generic/generic%20one - response_json_paths: - $.id: $RESPONSE['$.id'] - - - name: get new non uuid resource by internal id - GET: /v1/resource/generic/$RESPONSE['$.id'] - response_json_paths: - $.id: $RESPONSE['$.id'] - - - name: patch by external id - PATCH: /v1/resource/generic/generic%20one - data: - metrics: - cattle: - archive_policy_name: medium - status: 200 - response_strings: - - '"cattle"' - - - name: list metric by external resource id - GET: /v1/resource/generic/generic%20one/metric - response_json_paths: - $[0].name: cattle - - - name: list empty measures by external resource id - GET: /v1/resource/generic/generic%20one/metric/cattle/measures - response_json_paths: - $: [] - - - name: post measures by external resource id - POST: /v1/resource/generic/generic%20one/metric/cattle/measures - data: - - timestamp: "2015-03-06T14:33:57" - value: 43.1 - - timestamp: "2015-03-06T14:34:12" - value: 12 - status: 202 - - - name: list two measures by external resource id - GET: $LAST_URL - poll: - count: 10 - delay: 1 - response_json_paths: - $[0][2]: 43.1 - $[1][2]: 12 - - - name: delete the resource by external id - DELETE: /v1/resource/generic/generic%20one - status: 204 - -# Check length handling - - - name: fail to post too long non uuid resource id - POST: /v1/resource/generic - data: - id: four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - metrics: - cpu.util: - archive_policy_name: medium - status: 400 - response_strings: - - transformable resource id >255 max allowed characters for dictionary value - - - name: post long non uuid resource id - POST: $LAST_URL - data: - # 255 char string - id: four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue that counts four score and seven years ago we the people of the United States of America i have a dream it is the courage to continue - user_id: 0fbb231484614b1a80131fc22f6afc9c - project_id: f3d41b770cc14f0bb94a1d5be9c0e3ea - metrics: - cpu.util: - archive_policy_name: medium - status: 201 diff --git a/gnocchi/tests/functional/test_gabbi.py b/gnocchi/tests/functional/test_gabbi.py deleted file mode 100644 index 489bd5461..000000000 --- a/gnocchi/tests/functional/test_gabbi.py +++ /dev/null @@ -1,35 +0,0 @@ -# -# Copyright 2015 Red Hat. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A test module to exercise the Gnocchi API with gabbi.""" - -import os - -from gabbi import driver -import wsgi_intercept - -from gnocchi.tests.functional import fixtures - - -wsgi_intercept.STRICT_RESPONSE_HEADERS = True -TESTS_DIR = 'gabbits' - - -def load_tests(loader, tests, pattern): - """Provide a TestSuite to the discovery process.""" - test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) - return driver.build_tests(test_dir, loader, host=None, - intercept=fixtures.setup_app, - fixture_module=fixtures) diff --git a/gnocchi/tests/functional/test_gabbi_prefix.py b/gnocchi/tests/functional/test_gabbi_prefix.py deleted file mode 100644 index 0a77ceeb9..000000000 --- a/gnocchi/tests/functional/test_gabbi_prefix.py +++ /dev/null @@ -1,34 +0,0 @@ -# -# Copyright 2015 Red Hat. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A test module to exercise the Gnocchi API with gabbi.""" - -import os - -from gabbi import driver - -from gnocchi.tests.functional import fixtures - - -TESTS_DIR = 'gabbits' -PREFIX = '/gnocchi' - - -def load_tests(loader, tests, pattern): - """Provide a TestSuite to the discovery process.""" - test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) - return driver.build_tests(test_dir, loader, host=None, prefix=PREFIX, - intercept=fixtures.setup_app, - fixture_module=fixtures) diff --git a/gnocchi/tests/functional_live/__init__.py b/gnocchi/tests/functional_live/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/gnocchi/tests/functional_live/gabbits/live.yaml b/gnocchi/tests/functional_live/gabbits/live.yaml deleted file mode 100644 index d63cb0966..000000000 --- a/gnocchi/tests/functional_live/gabbits/live.yaml +++ /dev/null @@ -1,739 +0,0 @@ -# -# Confirmation tests to run against a live web server. -# -# These act as a very basic sanity check. - -defaults: - request_headers: - x-auth-token: $ENVIRON['GNOCCHI_SERVICE_TOKEN'] - authorization: $ENVIRON['GNOCCHI_AUTHORIZATION'] - -tests: - - name: check / - GET: / - - # Fail to create archive policy - - name: wrong archive policy content type - desc: attempt to create archive policy with invalid content-type - POST: /v1/archive_policy - request_headers: - content-type: text/plain - status: 415 - response_strings: - - Unsupported Media Type - - - name: wrong method - desc: attempt to create archive policy with 'PUT' method - PUT: /v1/archive_policy - request_headers: - content-type: application/json - status: 405 - - - name: invalid authZ - desc: x-auth-token is invalid - POST: /v1/archive_policy - request_headers: - content-type: application/json - x-auth-token: 'hello' - authorization: 'basic hello:' - data: - name: medium - definition: - - granularity: 1 second - status: 401 - - - name: bad archive policy body - desc: archive policy contains invalid key 'cowsay' - POST: /v1/archive_policy - request_headers: - content-type: application/json - data: - cowsay: moo - status: 400 - response_strings: - - "Invalid input: extra keys not allowed" - - - name: missing definition - desc: archive policy is missing 'definition' keyword - POST: /v1/archive_policy - request_headers: - content-type: application/json - data: - name: medium - status: 400 - response_strings: - - "Invalid input: required key not provided" - - - name: empty definition - desc: empty definition for archive policy - POST: /v1/archive_policy - request_headers: - content-type: application/json - data: - name: medium - definition: [] - status: 400 - response_strings: - - "Invalid input: length of value must be at least 1" - - - name: wrong value definition - desc: invalid type of 'definition' key - POST: /v1/archive_policy - request_headers: - content-type: application/json - data: - name: somename - definition: foobar - status: 400 - response_strings: - - "Invalid input: expected a list" - - - name: useless definition - desc: invalid archive policy definition - POST: /v1/archive_policy - request_headers: - content-type: application/json - data: - name: medium - definition: - - cowsay: moo - status: 400 - response_strings: - - "Invalid input: extra keys not allowed" - - # - # Create archive policy - # - - - name: create archive policy - desc: create archve policy 'gabbilive' for live tests - POST: /v1/archive_policy - request_headers: - content-type: application/json - data: - name: gabbilive - back_window: 0 - definition: - - granularity: 1 second - points: 60 - - granularity: 2 second - timespan: 1 minute - - points: 5 - timespan: 5 minute - aggregation_methods: - - mean - - min - - max - response_headers: - location: $SCHEME://$NETLOC/v1/archive_policy/gabbilive - status: 201 - - # Retrieve it correctly and then poorly - - - name: get archive policy - desc: retrieve archive policy 'gabbilive' and asster its values - GET: $LOCATION - response_headers: - content-type: /application/json/ - response_json_paths: - $.name: gabbilive - $.back_window: 0 - $.definition[0].granularity: "0:00:01" - $.definition[0].points: 60 - $.definition[0].timespan: "0:01:00" - $.definition[1].granularity: "0:00:02" - $.definition[1].points: 30 - $.definition[1].timespan: "0:01:00" - $.definition[2].granularity: "0:01:00" - $.definition[2].points: 5 - $.definition[2].timespan: "0:05:00" - response_json_paths: - $.aggregation_methods.`sorted`: ["max", "mean", "min"] - - - name: get wrong accept - desc: invalid 'accept' header - GET: /v1/archive_policy/medium - request_headers: - accept: text/plain - status: 406 - - # Unexpected methods - - - name: post single archive - desc: unexpected 'POST' request to archive policy - POST: /v1/archive_policy/gabbilive - status: 405 - - - name: put single archive - desc: unexpected 'PUT' request to archive policy - PUT: /v1/archive_policy/gabbilive - status: 405 - - # Duplicated archive policy names ain't allowed - - - name: create duplicate archive policy - desc: create archve policy 'gabbilive' for live tests - POST: /v1/archive_policy - request_headers: - content-type: application/json - data: - name: gabbilive - definition: - - granularity: 30 second - points: 60 - status: 409 - response_strings: - - Archive policy gabbilive already exists - - # Create a unicode named policy - - - name: post unicode policy name - POST: /v1/archive_policy - request_headers: - content-type: application/json - data: - name: ✔éñ☃ - definition: - - granularity: 1 minute - points: 20 - status: 201 - response_headers: - location: $SCHEME://$NETLOC/v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 - response_json_paths: - name: ✔éñ☃ - - - name: retrieve unicode policy name - GET: $LOCATION - response_json_paths: - name: ✔éñ☃ - - - name: delete unicode archive policy - DELETE: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 - status: 204 - - # It really is gone - - - name: confirm delete - desc: assert deleted unicode policy is not available - GET: /v1/archive_policy/%E2%9C%94%C3%A9%C3%B1%E2%98%83 - status: 404 - - # Fail to delete one that does not exist - - - name: delete missing archive - desc: delete non-existent archive policy - DELETE: /v1/archive_policy/grandiose - status: 404 - response_strings: - - Archive policy grandiose does not exist - - # Attempt to create illogical policies - - - name: create illogical policy - POST: /v1/archive_policy - request_headers: - content-type: application/json - data: - name: complex - definition: - - granularity: 1 second - points: 60 - timespan: "0:01:01" - status: 400 - response_strings: - - timespan ≠ granularity × points - - - name: create identical granularities policy - POST: /v1/archive_policy - request_headers: - content-type: application/json - data: - name: complex - definition: - - granularity: 1 second - points: 60 - - granularity: 1 second - points: 120 - status: 400 - response_strings: - - "More than one archive policy uses granularity `1.0'" - - - name: policy invalid unit - desc: invalid unit for archive policy 'timespan' key - POST: /v1/archive_policy - request_headers: - content-type: application/json - data: - name: 227d0e1f-4295-4e4b-8515-c296c47d71d3 - definition: - - granularity: 1 second - timespan: "1 shenanigan" - status: 400 - - # - # Archive policy rules - # - - - name: create archive policy rule1 - POST: /v1/archive_policy_rule - request_headers: - content-type: application/json - data: - name: gabbilive_rule - metric_pattern: "live.*" - archive_policy_name: gabbilive - status: 201 - response_json_paths: - $.metric_pattern: "live.*" - $.archive_policy_name: gabbilive - $.name: gabbilive_rule - - - name: create invalid archive policy rule - POST: /v1/archive_policy_rule - request_headers: - content-type: application/json - data: - name: test_rule - metric_pattern: "disk.foo.*" - status: 400 - - - name: missing auth archive policy rule - POST: /v1/archive_policy_rule - request_headers: - content-type: application/json - x-auth-token: 'hello' - authorization: 'basic hello:' - data: - name: test_rule - metric_pattern: "disk.foo.*" - archive_policy_name: low - status: 401 - - - name: wrong archive policy rule content type - POST: /v1/archive_policy_rule - request_headers: - content-type: text/plain - status: 415 - response_strings: - - Unsupported Media Type - - - name: bad archive policy rule body - POST: /v1/archive_policy_rule - request_headers: - content-type: application/json - data: - whaa: foobar - status: 400 - response_strings: - - "Invalid input: extra keys not allowed" - - # get an archive policy rules - - - name: get all archive policy rules - GET: /v1/archive_policy_rule - status: 200 - response_json_paths: - $[\name][0].name: "gabbilive_rule" - $[\name][0].metric_pattern: "live.*" - $[\name][0].archive_policy_name: "gabbilive" - - - name: get unknown archive policy rule - GET: /v1/archive_policy_rule/foo - status: 404 - - - - name: get archive policy rule - GET: /v1/archive_policy_rule/gabbilive_rule - status: 200 - response_json_paths: - $.metric_pattern: "live.*" - $.archive_policy_name: "gabbilive" - $.name: "gabbilive_rule" - - - name: delete archive policy in use - desc: fails due to https://bugs.launchpad.net/gnocchi/+bug/1569781 - DELETE: /v1/archive_policy/gabbilive - status: 400 - - # - # Metrics - # - - - - name: get all metrics - GET: /v1/metric - status: 200 - - - name: create metric with name and rule - POST: /v1/metric - request_headers: - content-type: application/json - data: - name: "live.io.rate" - status: 201 - response_json_paths: - $.archive_policy_name: gabbilive - $.name: live.io.rate - - - name: assert metric is present in listing - GET: /v1/metric?id=$HISTORY['create metric with name and rule'].$RESPONSE['$.id'] - response_json_paths: - $.`len`: 1 - - - name: assert metric is the only one with this policy - GET: /v1/metric?archive_policy_name=gabbilive - response_json_paths: - $.`len`: 1 - - - name: delete metric - DELETE: /v1/metric/$HISTORY['create metric with name and rule'].$RESPONSE['$.id'] - status: 204 - - - name: assert metric is expunged - GET: $HISTORY['assert metric is present in listing'].$URL&status=delete - poll: - count: 360 - delay: 1 - response_json_paths: - $.`len`: 0 - - - name: create metric with name and policy - POST: /v1/metric - request_headers: - content-type: application/json - data: - name: "aagabbi.live.metric" - archive_policy_name: "gabbilive" - status: 201 - response_json_paths: - $.archive_policy_name: gabbilive - $.name: "aagabbi.live.metric" - - - name: get valid metric id - GET: $LOCATION - status: 200 - response_json_paths: - $.archive_policy.name: gabbilive - - - name: delete the metric - DELETE: /v1/metric/$RESPONSE['$.id'] - status: 204 - - - name: ensure the metric is delete - GET: /v1/metric/$HISTORY['get valid metric id'].$RESPONSE['$.id'] - status: 404 - - - name: create metric bad archive policy - POST: /v1/metric - request_headers: - content-type: application/json - data: - archive_policy_name: 2e2675aa-105e-4664-a30d-c407e6a0ea7f - status: 400 - response_strings: - - Archive policy 2e2675aa-105e-4664-a30d-c407e6a0ea7f does not exist - - - name: create metric bad content-type - POST: /v1/metric - request_headers: - content-type: plain/text - data: '{"archive_policy_name": "cookies"}' - status: 415 - - - # - # Cleanup - # - - - name: delete archive policy rule - DELETE: /v1/archive_policy_rule/gabbilive_rule - status: 204 - - - name: confirm delete archive policy rule - DELETE: /v1/archive_policy_rule/gabbilive_rule - status: 404 - - - # - # Resources section - # - - - name: root of resource - GET: /v1/resource - response_json_paths: - $.generic: $SCHEME://$NETLOC/v1/resource/generic - - - name: typo of resource - GET: /v1/resoue - status: 404 - - - name: typo of resource extra - GET: /v1/resource/foobar - status: 404 - - - name: generic resource - GET: /v1/resource/generic - status: 200 - - - name: post resource type - POST: /v1/resource_type - request_headers: - content-type: application/json - data: - name: myresource - attributes: - display_name: - type: string - required: true - max_length: 5 - min_length: 2 - status: 201 - response_headers: - location: $SCHEME://$NETLOC/v1/resource_type/myresource - - - name: add an attribute - PATCH: /v1/resource_type/myresource - request_headers: - content-type: application/json-patch+json - data: - - op: "add" - path: "/attributes/awesome-stuff" - value: {"type": "bool", "required": false} - status: 200 - response_json_paths: - $.name: myresource - $.attributes."awesome-stuff".type: bool - $.attributes.[*].`len`: 2 - - - name: remove an attribute - PATCH: /v1/resource_type/myresource - request_headers: - content-type: application/json-patch+json - data: - - op: "remove" - path: "/attributes/awesome-stuff" - status: 200 - response_json_paths: - $.name: myresource - $.attributes.display_name.type: string - $.attributes.[*].`len`: 1 - - - name: myresource resource bad accept - desc: Expect 406 on bad accept type - request_headers: - accept: text/plain - GET: /v1/resource/myresource - status: 406 - response_strings: - - 406 Not Acceptable - - - name: myresource resource complex accept - desc: failover accept media type appropriately - request_headers: - accept: text/plain, application/json; q=0.8 - GET: /v1/resource/myresource - status: 200 - - - name: post myresource resource - POST: /v1/resource/myresource - request_headers: - content-type: application/json - data: - id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e - user_id: 126204ef-989a-46fd-999b-ee45c8108f31 - project_id: 98e785d7-9487-4159-8ab8-8230ec37537a - display_name: myvm - metrics: - vcpus: - archive_policy_name: gabbilive - status: 201 - response_json_paths: - $.id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e - $.user_id: 126204ef-989a-46fd-999b-ee45c8108f31 - $.project_id: 98e785d7-9487-4159-8ab8-8230ec37537a - $.display_name: "myvm" - - - name: get myresource resource - GET: $LOCATION - status: 200 - response_json_paths: - $.id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e - $.user_id: 126204ef-989a-46fd-999b-ee45c8108f31 - $.project_id: 98e785d7-9487-4159-8ab8-8230ec37537a - $.display_name: "myvm" - - - name: get vcpus metric - GET: /v1/metric/$HISTORY['get myresource resource'].$RESPONSE['$.metrics.vcpus'] - status: 200 - response_json_paths: - $.name: vcpus - $.resource.id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e - - - name: search for myresource resource via user_id - POST: /v1/search/resource/myresource - request_headers: - content-type: application/json - data: - =: - user_id: "126204ef-989a-46fd-999b-ee45c8108f31" - response_json_paths: - $..id: 2ae35573-7f9f-4bb1-aae8-dad8dff5706e - $..user_id: 126204ef-989a-46fd-999b-ee45c8108f31 - $..project_id: 98e785d7-9487-4159-8ab8-8230ec37537a - $..display_name: myvm - - - name: search for myresource resource via user_id and 'generic' type - POST: /v1/search/resource/generic - request_headers: - content-type: application/json - data: - =: - id: "2ae35573-7f9f-4bb1-aae8-dad8dff5706e" - response_strings: - - '"user_id": "126204ef-989a-46fd-999b-ee45c8108f31"' - - - name: search for myresource resource via user_id and project_id - POST: /v1/search/resource/generic - request_headers: - content-type: application/json - data: - and: - - =: - user_id: "126204ef-989a-46fd-999b-ee45c8108f31" - - =: - project_id: "98e785d7-9487-4159-8ab8-8230ec37537a" - response_strings: - - '"id": "2ae35573-7f9f-4bb1-aae8-dad8dff5706e"' - - - name: patch myresource resource - PATCH: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e - request_headers: - content-type: application/json - data: - display_name: myvm2 - status: 200 - response_json_paths: - display_name: myvm2 - - - name: post some measures to the metric on myresource - POST: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures - request_headers: - content-type: application/json - data: - - timestamp: "2015-03-06T14:33:57" - value: 2 - - timestamp: "2015-03-06T14:34:12" - value: 2 - status: 202 - - - name: get myresource measures with poll - GET: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures - # wait up to 60 seconds before policy is deleted - poll: - count: 60 - delay: 1 - response_json_paths: - $[0][2]: 2 - $[1][2]: 2 - - - name: post some more measures to the metric on myresource - POST: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures - request_headers: - content-type: application/json - data: - - timestamp: "2015-03-06T14:34:15" - value: 5 - - timestamp: "2015-03-06T14:34:20" - value: 5 - status: 202 - - - name: get myresource measures with refresh - GET: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e/metric/vcpus/measures?refresh=true - response_json_paths: - $[0][2]: 2 - $[1][2]: 4 - $[2][2]: 2 - $[3][2]: 2 - $[4][2]: 5 - $[5][2]: 5 - - # - # Search for resources - # - - - name: typo of search - POST: /v1/search/notexists - status: 404 - - - name: typo of search in resource - POST: /v1/search/resource/foobar - status: 404 - - - name: search with invalid uuid - POST: /v1/search/resource/generic - request_headers: - content-type: application/json - data: - =: - id: "cd9eef" - status: 200 - response_json_paths: - $.`len`: 0 - - - name: assert vcpus metric exists in listing - GET: /v1/metric?id=$HISTORY['get myresource resource'].$RESPONSE['$.metrics.vcpus'] - poll: - count: 360 - delay: 1 - response_json_paths: - $.`len`: 1 - - - name: delete myresource resource - DELETE: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e - status: 204 - - # assert resource is really deleted - - name: assert resource resource is deleted - GET: /v1/resource/myresource/2ae35573-7f9f-4bb1-aae8-dad8dff5706e - status: 404 - - - name: assert vcpus metric is really expurged - GET: $HISTORY['assert vcpus metric exists in listing'].$URL&status=delete - poll: - count: 360 - delay: 1 - response_json_paths: - $.`len`: 0 - - - name: post myresource resource no data - POST: /v1/resource/myresource - request_headers: - content-type: application/json - status: 400 - - - name: assert no metrics have the gabbilive policy - GET: $HISTORY['assert metric is the only one with this policy'].$URL - response_json_paths: - $.`len`: 0 - - - name: assert no delete metrics have the gabbilive policy - GET: $HISTORY['assert metric is the only one with this policy'].$URL&status=delete - response_json_paths: - $.`len`: 0 - - - name: delete single archive policy cleanup - DELETE: /v1/archive_policy/gabbilive - poll: - count: 360 - delay: 1 - status: 204 - - # It really is gone - - - name: delete our resource type - DELETE: /v1/resource_type/myresource - status: 204 - - - name: confirm delete of cleanup - GET: /v1/archive_policy/gabbilive - status: 404 diff --git a/gnocchi/tests/functional_live/gabbits/search-resource.yaml b/gnocchi/tests/functional_live/gabbits/search-resource.yaml deleted file mode 100644 index fe2547885..000000000 --- a/gnocchi/tests/functional_live/gabbits/search-resource.yaml +++ /dev/null @@ -1,275 +0,0 @@ -# -# Tests to confirm resources are searchable. Run against a live setup. -# URL: http://gnocchi.xyz/rest.html#searching-for-resources -# -# Instance-ResourceID-1: a64ca14f-bc7c-45b0-aa85-42cd2179e1e2 -# Instance-ResourceID-2: 7ccccfa0-92ce-4225-80ca-3ac9cb122d6a -# Instance-ResourceID-3: c442a47c-eb33-46ce-9665-f3aa0bef54e7 -# -# UserID-1: 33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07 -# UserID-2: 81d82ef3-4deb-499d-9270-9aeb5a3ec5fe -# -# ProjectID-1: c9a5f184-c0d0-4daa-83c3-af6fdc0879e6 -# ProjectID-2: 40eba01c-b348-49b8-803f-67123251a00a -# -# ImageID-1: 7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d -# ImageID-2: b01f2588-89dc-46b2-897b-fffae1e10975 -# - -defaults: - request_headers: - x-auth-token: $ENVIRON['GNOCCHI_SERVICE_TOKEN'] - authorization: $ENVIRON['GNOCCHI_AUTHORIZATION'] - -tests: - # - # Setup resource types if don't exist - # - - - name: create new resource type 'instance-like' - POST: /v1/resource_type - status: 201 - request_headers: - content-type: application/json - data: - name: instance-like - attributes: - display_name: - type: string - required: True - flavor_id: - type: string - required: True - host: - type: string - required: True - image_ref: - type: string - required: False - server_group: - type: string - required: False - - - name: create new resource type 'image-like' - POST: /v1/resource_type - status: 201 - request_headers: - content-type: application/json - data: - name: image-like - attributes: - name: - type: string - required: True - disk_format: - type: string - required: True - container_format: - type: string - required: True - - # - # Setup test resources - # - - name: helper. create instance-like resource-1 - POST: /v1/resource/instance-like - request_headers: - content-type: application/json - data: - display_name: vm-gabbi-1 - id: a64ca14f-bc7c-45b0-aa85-42cd2179e1e2 - user_id: 33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07 - flavor_id: "1" - image_ref: 7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d - host: compute-0-gabbi.localdomain - project_id: c9a5f184-c0d0-4daa-83c3-af6fdc0879e6 - status: 201 - - - name: helper. create instance-like resource-2 - POST: /v1/resource/instance-like - request_headers: - content-type: application/json - data: - display_name: vm-gabbi-2 - id: 7ccccfa0-92ce-4225-80ca-3ac9cb122d6a - user_id: 33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07 - flavor_id: "2" - image_ref: b01f2588-89dc-46b2-897b-fffae1e10975 - host: compute-1-gabbi.localdomain - project_id: c9a5f184-c0d0-4daa-83c3-af6fdc0879e6 - status: 201 - - - name: helper. create instance-like resource-3 - POST: /v1/resource/instance-like - request_headers: - content-type: application/json - data: - display_name: vm-gabbi-3 - id: c442a47c-eb33-46ce-9665-f3aa0bef54e7 - user_id: 81d82ef3-4deb-499d-9270-9aeb5a3ec5fe - flavor_id: "2" - image_ref: b01f2588-89dc-46b2-897b-fffae1e10975 - host: compute-1-gabbi.localdomain - project_id: 40eba01c-b348-49b8-803f-67123251a00a - status: 201 - - - name: helper. create image-like resource-1 - POST: /v1/resource/image-like - request_headers: - content-type: application/json - data: - id: 7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d - container_format: bare - disk_format: qcow2 - name: gabbi-image-1 - user_id: 81d82ef3-4deb-499d-9270-9aeb5a3ec5fe - project_id: 40eba01c-b348-49b8-803f-67123251a00a - status: 201 - - # - # Actual tests - # - - - name: search for all resources with a specific user_id - desc: search through all resource types - POST: /v1/search/resource/generic - request_headers: - content-type: application/json - data: - =: - user_id: 81d82ef3-4deb-499d-9270-9aeb5a3ec5fe - status: 200 - response_json_paths: - $.`len`: 2 - response_json_paths: - $.[0].type: instance-like - $.[1].type: image-like - $.[0].id: c442a47c-eb33-46ce-9665-f3aa0bef54e7 - $.[1].id: 7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d - - - name: search for all resources of instance-like type create by specific user_id - desc: all instances created by a specified user - POST: /v1/search/resource/generic - request_headers: - content-type: application/json - data: - and: - - =: - type: instance-like - - =: - user_id: 33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07 - status: 200 - response_json_paths: - $.`len`: 2 - response_strings: - - '"id": "a64ca14f-bc7c-45b0-aa85-42cd2179e1e2"' - - '"id": "7ccccfa0-92ce-4225-80ca-3ac9cb122d6a"' - response_json_paths: - $.[0].id: a64ca14f-bc7c-45b0-aa85-42cd2179e1e2 - $.[1].id: 7ccccfa0-92ce-4225-80ca-3ac9cb122d6a - $.[0].type: instance-like - $.[1].type: instance-like - $.[0].metrics.`len`: 0 - $.[1].metrics.`len`: 0 - - - name: search for all resources with a specific project_id - desc: search for all resources in a specific project - POST: /v1/search/resource/generic - request_headers: - content-type: application/json - data: - =: - project_id: c9a5f184-c0d0-4daa-83c3-af6fdc0879e6 - status: 200 - response_json_paths: - $.`len`: 2 - - - name: search for intances on a specific compute using "like" keyword - desc: search for vms hosted on a specific compute node - POST: /v1/search/resource/instance-like - request_headers: - content-type: application/json - data: - like: - host: 'compute-1-gabbi%' - response_json_paths: - $.`len`: 2 - response_strings: - - '"project_id": "40eba01c-b348-49b8-803f-67123251a00a"' - - '"project_id": "c9a5f184-c0d0-4daa-83c3-af6fdc0879e6"' - - '"user_id": "33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07"' - - '"user_id": "81d82ef3-4deb-499d-9270-9aeb5a3ec5fe"' - - '"display_name": "vm-gabbi-2"' - - '"display_name": "vm-gabbi-3"' - - - name: search for instances using complex search with "like" keyword and user_id - desc: search for vms of specified user hosted on a specific compute node - POST: /v1/search/resource/instance-like - request_headers: - content-type: application/json - data: - and: - - like: - host: 'compute-%-gabbi%' - - =: - user_id: 33ba83ca-2f12-4ad6-8fa2-bc8b55d36e07 - response_json_paths: - $.`len`: 2 - response_strings: - - '"display_name": "vm-gabbi-1"' - - '"display_name": "vm-gabbi-2"' - - '"project_id": "c9a5f184-c0d0-4daa-83c3-af6fdc0879e6"' - - - name: search for resources of instance-like or image-like type with specific user_id - desc: search for all image-like or instance-like resources created by a specific user - POST: /v1/search/resource/generic - request_headers: - content-type: application/json - data: - and: - - =: - user_id: 81d82ef3-4deb-499d-9270-9aeb5a3ec5fe - - - or: - - =: - type: instance-like - - - =: - type: image-like - status: 200 - response_json_paths: - $.`len`: 2 - response_strings: - - '"type": "image-like"' - - '"type": "instance-like"' - - '"id": "7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d"' - - '"id": "c442a47c-eb33-46ce-9665-f3aa0bef54e7"' - - # - # Tear down resources - # - - - name: helper. delete instance-like resource-1 - DELETE: /v1/resource/instance-like/a64ca14f-bc7c-45b0-aa85-42cd2179e1e2 - status: 204 - - - name: helper. delete instance-like resource-2 - DELETE: /v1/resource/instance-like/7ccccfa0-92ce-4225-80ca-3ac9cb122d6a - status: 204 - - - name: helper. delete instance-like resource-3 - DELETE: /v1/resource/instance-like/c442a47c-eb33-46ce-9665-f3aa0bef54e7 - status: 204 - - - name: helper. delete image-like resource - DELETE: /v1/resource/image-like/7ab2f7ae-7af5-4469-bdc8-3c0f6dfab75d - status: 204 - - - name: helper. delete resource-type instance-like - DELETE: /v1/resource_type/instance-like - status: 204 - - - name: helper. delete resource-type image-like - DELETE: /v1/resource_type/image-like - status: 204 - diff --git a/gnocchi/tests/functional_live/test_gabbi_live.py b/gnocchi/tests/functional_live/test_gabbi_live.py deleted file mode 100644 index aeed07a88..000000000 --- a/gnocchi/tests/functional_live/test_gabbi_live.py +++ /dev/null @@ -1,48 +0,0 @@ -# -# Copyright 2015 Red Hat. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A test module to exercise the Gnocchi API with gabbi.""" - -import os - -from gabbi import driver -import six.moves.urllib.parse as urlparse - - -TESTS_DIR = 'gabbits' - - -def load_tests(loader, tests, pattern): - """Provide a TestSuite to the discovery process.""" - gnocchi_url = os.getenv('GNOCCHI_ENDPOINT') - if gnocchi_url: - parsed_url = urlparse.urlsplit(gnocchi_url) - prefix = parsed_url.path.rstrip('/') # turn it into a prefix - - # NOTE(chdent): gabbi requires a port be passed or it will - # default to 8001, so we must dance a little dance to get - # the right ports. Probably gabbi needs to change. - # https://github.com/cdent/gabbi/issues/50 - port = 443 if parsed_url.scheme == 'https' else 80 - if parsed_url.port: - port = parsed_url.port - - test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) - return driver.build_tests(test_dir, loader, - host=parsed_url.hostname, - port=port, - prefix=prefix) - elif os.getenv("GABBI_LIVE"): - raise RuntimeError('"GNOCCHI_ENDPOINT" is not set') diff --git a/gnocchi/tests/indexer/__init__.py b/gnocchi/tests/indexer/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/gnocchi/tests/indexer/sqlalchemy/__init__.py b/gnocchi/tests/indexer/sqlalchemy/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py b/gnocchi/tests/indexer/sqlalchemy/test_migrations.py deleted file mode 100644 index 781236fd4..000000000 --- a/gnocchi/tests/indexer/sqlalchemy/test_migrations.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2015 eNovance -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import abc - -import fixtures -import mock -import oslo_db.exception -from oslo_db.sqlalchemy import test_migrations -import six -import sqlalchemy as sa -import sqlalchemy_utils - -from gnocchi import indexer -from gnocchi.indexer import sqlalchemy -from gnocchi.indexer import sqlalchemy_base -from gnocchi.tests import base - - -class ABCSkip(base.SkipNotImplementedMeta, abc.ABCMeta): - pass - - -class ModelsMigrationsSync( - six.with_metaclass(ABCSkip, - base.TestCase, - test_migrations.ModelsMigrationsSync)): - - def _set_timeout(self): - self.useFixture(fixtures.Timeout(120, gentle=True)) - - def setUp(self): - super(ModelsMigrationsSync, self).setUp() - self.db = mock.Mock() - self.conf.set_override( - 'url', - sqlalchemy.SQLAlchemyIndexer._create_new_database( - self.conf.indexer.url), - 'indexer') - self.index = indexer.get_driver(self.conf) - self.index.connect() - self.index.upgrade(nocreate=True) - self.addCleanup(self._drop_database) - - def _drop_database(self): - try: - sqlalchemy_utils.drop_database(self.conf.indexer.url) - except oslo_db.exception.DBNonExistentDatabase: - # NOTE(sileht): oslo db >= 4.15.0 cleanup this for us - pass - - @staticmethod - def get_metadata(): - return sqlalchemy_base.Base.metadata - - def get_engine(self): - return self.index.get_engine() - - def db_sync(self, engine): - # NOTE(sileht): We ensure all resource type sqlalchemy model are loaded - # in this process - for rt in self.index.list_resource_types(): - if rt.state == "active": - self.index._RESOURCE_TYPE_MANAGER.get_classes(rt) - - def filter_metadata_diff(self, diff): - tables_to_keep = [] - for rt in self.index.list_resource_types(): - if rt.name.startswith("indexer_test"): - tables_to_keep.extend([rt.tablename, - "%s_history" % rt.tablename]) - new_diff = [] - for line in diff: - if len(line) >= 2: - item = line[1] - # NOTE(sileht): skip resource types created for tests - if (isinstance(item, sa.Table) - and item.name in tables_to_keep): - continue - new_diff.append(line) - return new_diff diff --git a/gnocchi/tests/test_aggregates.py b/gnocchi/tests/test_aggregates.py deleted file mode 100644 index d5d4e900d..000000000 --- a/gnocchi/tests/test_aggregates.py +++ /dev/null @@ -1,116 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2014-2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime -import uuid - -import pandas -from stevedore import extension - -from gnocchi import aggregates -from gnocchi.aggregates import moving_stats -from gnocchi import storage -from gnocchi.tests import base as tests_base -from gnocchi.tests import utils as tests_utils -from gnocchi import utils - - -class TestAggregates(tests_base.TestCase): - - def setUp(self): - super(TestAggregates, self).setUp() - mgr = extension.ExtensionManager('gnocchi.aggregates', - invoke_on_load=True) - self.custom_agg = dict((x.name, x.obj) for x in mgr) - - def test_extension_dict(self): - self.assertIsInstance(self.custom_agg['moving-average'], - moving_stats.MovingAverage) - - def test_check_window_valid(self): - for agg_method in self.custom_agg: - window = '60s' - agg_obj = self.custom_agg[agg_method] - result = agg_obj.check_window_valid(window) - self.assertEqual(60.0, result) - - window = '60' - agg_obj = self.custom_agg[agg_method] - result = agg_obj.check_window_valid(window) - self.assertEqual(60.0, result) - - def _test_create_metric_and_data(self, data, spacing): - metric = storage.Metric( - uuid.uuid4(), self.archive_policies['medium']) - start_time = utils.datetime_utc(2014, 1, 1, 12) - incr = datetime.timedelta(seconds=spacing) - measures = [storage.Measure( - utils.dt_in_unix_ns(start_time + incr * n), val) - for n, val in enumerate(data)] - self.index.create_metric(metric.id, str(uuid.uuid4()), 'medium') - self.storage.incoming.add_measures(metric, measures) - metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming) - self.storage.process_background_tasks(self.index, metrics, sync=True) - - return metric - - def test_retrieve_data(self): - metric = self._test_create_metric_and_data([69, 42, 6, 44, 7], - spacing=20) - for agg_method in self.custom_agg: - agg_obj = self.custom_agg[agg_method] - window = 90.0 - self.assertRaises(aggregates.CustomAggFailure, - agg_obj.retrieve_data, - self.storage, metric, - start=None, stop=None, - window=window) - - window = 120.0 - result = pandas.Series() - grain, result = agg_obj.retrieve_data(self.storage, metric, - start=None, stop=None, - window=window) - self.assertEqual(60.0, grain) - self.assertEqual(39.0, result[datetime.datetime(2014, 1, 1, 12)]) - self.assertEqual(25.5, - result[datetime.datetime(2014, 1, 1, 12, 1)]) - self.storage.delete_metric(metric) - - def test_compute_moving_average(self): - metric = self._test_create_metric_and_data([69, 42, 6, 44, 7], - spacing=20) - agg_obj = self.custom_agg['moving-average'] - window = '120s' - - center = 'False' - result = agg_obj.compute(self.storage, metric, - start=None, stop=None, - window=window, center=center) - expected = [(utils.datetime_utc(2014, 1, 1, 12), 120.0, 32.25)] - self.assertEqual(expected, result) - - center = 'True' - result = agg_obj.compute(self.storage, metric, - start=None, stop=None, - window=window, center=center) - - expected = [(utils.datetime_utc(2014, 1, 1, 12, 1), 120.0, 28.875)] - self.assertEqual(expected, result) - # (FIXME) atmalagon: doing a centered average when - # there are only two points in the retrieved data seems weird. - # better to raise an error or return nan in this case? - - self.storage.delete_metric(metric) diff --git a/gnocchi/tests/test_archive_policy.py b/gnocchi/tests/test_archive_policy.py deleted file mode 100644 index 3b2afb08a..000000000 --- a/gnocchi/tests/test_archive_policy.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslotest import base - -from gnocchi import archive_policy -from gnocchi import service - - -class TestArchivePolicy(base.BaseTestCase): - - def test_several_equal_granularities(self): - self.assertRaises(ValueError, - archive_policy.ArchivePolicy, - "foobar", - 0, - [(10, 12), (20, 30), (20, 30)], - ["*"]) - - def test_aggregation_methods(self): - conf = service.prepare_service([], - default_config_files=[]) - - ap = archive_policy.ArchivePolicy("foobar", - 0, - [], - ["*"]) - self.assertEqual( - archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS, - ap.aggregation_methods) - - ap = archive_policy.ArchivePolicy("foobar", - 0, - [], - ["last"]) - self.assertEqual( - set(["last"]), - ap.aggregation_methods) - - ap = archive_policy.ArchivePolicy("foobar", - 0, - [], - ["*", "-mean"]) - self.assertEqual( - (archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS - - set(["mean"])), - ap.aggregation_methods) - - ap = archive_policy.ArchivePolicy("foobar", - 0, - [], - ["-mean", "-last"]) - self.assertEqual( - (set(conf.archive_policy.default_aggregation_methods) - - set(["mean", "last"])), - ap.aggregation_methods) - - ap = archive_policy.ArchivePolicy("foobar", - 0, - [], - ["+12pct"]) - self.assertEqual( - (set(conf.archive_policy.default_aggregation_methods) - .union(set(["12pct"]))), - ap.aggregation_methods) - - def test_max_block_size(self): - ap = archive_policy.ArchivePolicy("foobar", - 0, - [(20, 60), (10, 300), (10, 5)], - ["-mean", "-last"]) - self.assertEqual(ap.max_block_size, 300) - - -class TestArchivePolicyItem(base.BaseTestCase): - def test_zero_size(self): - self.assertRaises(ValueError, - archive_policy.ArchivePolicyItem, - 0, 1) - self.assertRaises(ValueError, - archive_policy.ArchivePolicyItem, - 1, 0) - self.assertRaises(ValueError, - archive_policy.ArchivePolicyItem, - -1, 1) - self.assertRaises(ValueError, - archive_policy.ArchivePolicyItem, - 1, -1) diff --git a/gnocchi/tests/test_bin.py b/gnocchi/tests/test_bin.py deleted file mode 100644 index e70bb8654..000000000 --- a/gnocchi/tests/test_bin.py +++ /dev/null @@ -1,24 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2017 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import subprocess - -from oslotest import base - - -class BinTestCase(base.BaseTestCase): - def test_gnocchi_config_generator_run(self): - subp = subprocess.Popen(['gnocchi-config-generator']) - self.assertEqual(0, subp.wait()) diff --git a/gnocchi/tests/test_carbonara.py b/gnocchi/tests/test_carbonara.py deleted file mode 100644 index 82ec819a9..000000000 --- a/gnocchi/tests/test_carbonara.py +++ /dev/null @@ -1,1292 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2014-2016 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime -import functools -import math - -import fixtures -import iso8601 -from oslotest import base -import pandas -import six - -from gnocchi import carbonara - - -class TestBoundTimeSerie(base.BaseTestCase): - def test_benchmark(self): - self.useFixture(fixtures.Timeout(300, gentle=True)) - carbonara.BoundTimeSerie.benchmark() - - @staticmethod - def test_base(): - carbonara.BoundTimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [3, 5, 6]) - - def test_block_size(self): - ts = carbonara.BoundTimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [3, 5, 6], - block_size='5s') - self.assertEqual(1, len(ts)) - ts.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 10), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 11), 4)]) - self.assertEqual(2, len(ts)) - - def test_block_size_back_window(self): - ts = carbonara.BoundTimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [3, 5, 6], - block_size='5s', - back_window=1) - self.assertEqual(3, len(ts)) - ts.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 10), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 11), 4)]) - self.assertEqual(3, len(ts)) - - def test_block_size_unordered(self): - ts = carbonara.BoundTimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 5), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [10, 5, 23], - block_size='5s') - self.assertEqual(2, len(ts)) - ts.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 11), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 10), 4)]) - self.assertEqual(2, len(ts)) - - def test_duplicate_timestamps(self): - ts = carbonara.BoundTimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [10, 23]) - self.assertEqual(2, len(ts)) - self.assertEqual(10.0, ts[0]) - self.assertEqual(23.0, ts[1]) - - ts.set_values([(datetime.datetime(2014, 1, 1, 13, 0, 10), 3), - (datetime.datetime(2014, 1, 1, 13, 0, 11), 9), - (datetime.datetime(2014, 1, 1, 13, 0, 11), 8), - (datetime.datetime(2014, 1, 1, 13, 0, 11), 7), - (datetime.datetime(2014, 1, 1, 13, 0, 11), 4)]) - self.assertEqual(4, len(ts)) - self.assertEqual(10.0, ts[0]) - self.assertEqual(23.0, ts[1]) - self.assertEqual(3.0, ts[2]) - self.assertEqual(4.0, ts[3]) - - -class TestAggregatedTimeSerie(base.BaseTestCase): - @staticmethod - def test_base(): - carbonara.AggregatedTimeSerie.from_data( - 3, 'mean', - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [3, 5, 6]) - carbonara.AggregatedTimeSerie.from_data( - "4s", 'mean', - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [3, 5, 6]) - - def test_benchmark(self): - self.useFixture(fixtures.Timeout(300, gentle=True)) - carbonara.AggregatedTimeSerie.benchmark() - - def test_fetch_basic(self): - ts = carbonara.AggregatedTimeSerie.from_data( - timestamps=[datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - aggregation_method='mean', - values=[3, 5, 6], - sampling="1s") - self.assertEqual( - [(datetime.datetime(2014, 1, 1, 12), 1, 3), - (datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], - ts.fetch()) - self.assertEqual( - [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], - ts.fetch(from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 4))) - self.assertEqual( - [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], - ts.fetch( - from_timestamp=iso8601.parse_date( - "2014-01-01 12:00:04"))) - self.assertEqual( - [(datetime.datetime(2014, 1, 1, 12, 0, 4), 1, 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 1, 6)], - ts.fetch( - from_timestamp=iso8601.parse_date( - "2014-01-01 13:00:04+01:00"))) - - def test_before_epoch(self): - ts = carbonara.TimeSerie.from_tuples( - [(datetime.datetime(1950, 1, 1, 12), 3), - (datetime.datetime(2014, 1, 1, 12), 5), - (datetime.datetime(2014, 1, 1, 12), 6)]) - - self.assertRaises(carbonara.BeforeEpochError, - ts.group_serie, 60) - - @staticmethod - def _resample(ts, sampling, agg, max_size=None): - grouped = ts.group_serie(sampling) - return carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, sampling, agg, max_size=max_size) - - def test_74_percentile_serialized(self): - ts = carbonara.TimeSerie.from_tuples( - [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 4), 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 6)]) - ts = self._resample(ts, 60, '74pct') - - self.assertEqual(1, len(ts)) - self.assertEqual(5.48, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) - - # Serialize and unserialize - key = ts.get_split_key() - o, s = ts.serialize(key) - saved_ts = carbonara.AggregatedTimeSerie.unserialize( - s, key, '74pct', ts.sampling) - - ts = carbonara.TimeSerie.from_tuples( - [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 4), 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 6)]) - ts = self._resample(ts, 60, '74pct') - ts.merge(saved_ts) - - self.assertEqual(1, len(ts)) - self.assertEqual(5.48, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) - - def test_95_percentile(self): - ts = carbonara.TimeSerie.from_tuples( - [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 4), 5), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 6)]) - ts = self._resample(ts, 60, '95pct') - - self.assertEqual(1, len(ts)) - self.assertEqual(5.9000000000000004, - ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) - - def _do_test_aggregation(self, name, v1, v2): - ts = carbonara.TimeSerie.from_tuples( - [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 4), 6), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 5), - (datetime.datetime(2014, 1, 1, 12, 1, 4), 8), - (datetime.datetime(2014, 1, 1, 12, 1, 6), 9)]) - ts = self._resample(ts, 60, name) - - self.assertEqual(2, len(ts)) - self.assertEqual(v1, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) - self.assertEqual(v2, ts[datetime.datetime(2014, 1, 1, 12, 1, 0)]) - - def test_aggregation_first(self): - self._do_test_aggregation('first', 3, 8) - - def test_aggregation_last(self): - self._do_test_aggregation('last', 5, 9) - - def test_aggregation_count(self): - self._do_test_aggregation('count', 3, 2) - - def test_aggregation_sum(self): - self._do_test_aggregation('sum', 14, 17) - - def test_aggregation_mean(self): - self._do_test_aggregation('mean', 4.666666666666667, 8.5) - - def test_aggregation_median(self): - self._do_test_aggregation('median', 5.0, 8.5) - - def test_aggregation_min(self): - self._do_test_aggregation('min', 3, 8) - - def test_aggregation_max(self): - self._do_test_aggregation('max', 6, 9) - - def test_aggregation_std(self): - self._do_test_aggregation('std', 1.5275252316519465, - 0.70710678118654757) - - def test_aggregation_std_with_unique(self): - ts = carbonara.TimeSerie.from_tuples( - [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3)]) - ts = self._resample(ts, 60, 'std') - self.assertEqual(0, len(ts), ts.ts.values) - - ts = carbonara.TimeSerie.from_tuples( - [(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 4), 6), - (datetime.datetime(2014, 1, 1, 12, 0, 9), 5), - (datetime.datetime(2014, 1, 1, 12, 1, 6), 9)]) - ts = self._resample(ts, 60, "std") - - self.assertEqual(1, len(ts)) - self.assertEqual(1.5275252316519465, - ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) - - def test_different_length_in_timestamps_and_data(self): - self.assertRaises(ValueError, - carbonara.AggregatedTimeSerie.from_data, - 3, 'mean', - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [3, 5]) - - def test_max_size(self): - ts = carbonara.TimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [3, 5, 6]) - ts = self._resample(ts, 1, 'mean', max_size=2) - - self.assertEqual(2, len(ts)) - self.assertEqual(5, ts[0]) - self.assertEqual(6, ts[1]) - - def test_down_sampling(self): - ts = carbonara.TimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9)], - [3, 5, 7]) - ts = self._resample(ts, 300, 'mean') - - self.assertEqual(1, len(ts)) - self.assertEqual(5, ts[datetime.datetime(2014, 1, 1, 12, 0, 0)]) - - def test_down_sampling_with_max_size(self): - ts = carbonara.TimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 1, 4), - datetime.datetime(2014, 1, 1, 12, 1, 9), - datetime.datetime(2014, 1, 1, 12, 2, 12)], - [3, 5, 7, 1]) - ts = self._resample(ts, 60, 'mean', max_size=2) - - self.assertEqual(2, len(ts)) - self.assertEqual(6, ts[datetime.datetime(2014, 1, 1, 12, 1, 0)]) - self.assertEqual(1, ts[datetime.datetime(2014, 1, 1, 12, 2, 0)]) - - def test_down_sampling_with_max_size_and_method_max(self): - ts = carbonara.TimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 1, 4), - datetime.datetime(2014, 1, 1, 12, 1, 9), - datetime.datetime(2014, 1, 1, 12, 2, 12)], - [3, 5, 70, 1]) - ts = self._resample(ts, 60, 'max', max_size=2) - - self.assertEqual(2, len(ts)) - self.assertEqual(70, ts[datetime.datetime(2014, 1, 1, 12, 1, 0)]) - self.assertEqual(1, ts[datetime.datetime(2014, 1, 1, 12, 2, 0)]) - - @staticmethod - def _resample_and_merge(ts, agg_dict): - """Helper method that mimics _add_measures workflow.""" - grouped = ts.group_serie(agg_dict['sampling']) - existing = agg_dict.get('return') - agg_dict['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, agg_dict['sampling'], agg_dict['agg'], - max_size=agg_dict.get('size')) - if existing: - agg_dict['return'].merge(existing) - - def test_aggregated_different_archive_no_overlap(self): - tsc1 = {'sampling': 60, 'size': 50, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': 60, 'size': 50, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - - tsb1.set_values([(datetime.datetime(2014, 1, 1, 11, 46, 4), 4)], - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - tsb2.set_values([(datetime.datetime(2014, 1, 1, 9, 1, 4), 4)], - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) - - dtfrom = datetime.datetime(2014, 1, 1, 11, 0, 0) - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.AggregatedTimeSerie.aggregated, - [tsc1['return'], tsc2['return']], - from_timestamp=dtfrom, aggregation='mean') - - def test_aggregated_different_archive_no_overlap2(self): - tsc1 = {'sampling': 60, 'size': 50, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = carbonara.AggregatedTimeSerie(sampling=60, max_size=50, - aggregation_method='mean') - - tsb1.set_values([(datetime.datetime(2014, 1, 1, 12, 3, 0), 4)], - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.AggregatedTimeSerie.aggregated, - [tsc1['return'], tsc2], aggregation='mean') - - def test_aggregated_different_archive_overlap(self): - tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - - # NOTE(sileht): minute 8 is missing in both and - # minute 7 in tsc2 too, but it looks like we have - # enough point to do the aggregation - tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 11, 0, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 10), - (datetime.datetime(2014, 1, 1, 12, 9, 0), 2), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - - tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 12, 1, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 9, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 11, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 12, 0), 2), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) - - dtfrom = datetime.datetime(2014, 1, 1, 12, 0, 0) - dtto = datetime.datetime(2014, 1, 1, 12, 10, 0) - - # By default we require 100% of point that overlap - # so that fail - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.AggregatedTimeSerie.aggregated, - [tsc1['return'], tsc2['return']], - from_timestamp=dtfrom, - to_timestamp=dtto, aggregation='mean') - - # Retry with 80% and it works - output = carbonara.AggregatedTimeSerie.aggregated([ - tsc1['return'], tsc2['return']], - from_timestamp=dtfrom, to_timestamp=dtto, - aggregation='mean', needed_percent_of_overlap=80.0) - - self.assertEqual([ - (datetime.datetime( - 2014, 1, 1, 12, 1, 0 - ), 60.0, 3.0), - (datetime.datetime( - 2014, 1, 1, 12, 2, 0 - ), 60.0, 3.0), - (datetime.datetime( - 2014, 1, 1, 12, 3, 0 - ), 60.0, 4.0), - (datetime.datetime( - 2014, 1, 1, 12, 4, 0 - ), 60.0, 4.0), - (datetime.datetime( - 2014, 1, 1, 12, 5, 0 - ), 60.0, 3.0), - (datetime.datetime( - 2014, 1, 1, 12, 6, 0 - ), 60.0, 5.0), - (datetime.datetime( - 2014, 1, 1, 12, 7, 0 - ), 60.0, 10.0), - (datetime.datetime( - 2014, 1, 1, 12, 9, 0 - ), 60.0, 2.0), - ], output) - - def test_aggregated_different_archive_overlap_edge_missing1(self): - tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - - tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 12, 3, 0), 9), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 1), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 7), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 5), - (datetime.datetime(2014, 1, 1, 12, 8, 0), 3), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - - tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 11, 0, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 13), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 24), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 16), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 12), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) - - # By default we require 100% of point that overlap - # but we allow that the last datapoint is missing - # of the precisest granularity - output = carbonara.AggregatedTimeSerie.aggregated([ - tsc1['return'], tsc2['return']], aggregation='sum') - - self.assertEqual([ - (datetime.datetime( - 2014, 1, 1, 12, 3, 0 - ), 60.0, 33.0), - (datetime.datetime( - 2014, 1, 1, 12, 4, 0 - ), 60.0, 5.0), - (datetime.datetime( - 2014, 1, 1, 12, 5, 0 - ), 60.0, 18.0), - (datetime.datetime( - 2014, 1, 1, 12, 6, 0 - ), 60.0, 19.0), - ], output) - - def test_aggregated_different_archive_overlap_edge_missing2(self): - tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - - tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - - tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 11, 0, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 4), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) - - output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1['return'], tsc2['return']], aggregation='mean') - self.assertEqual([ - (datetime.datetime( - 2014, 1, 1, 12, 3, 0 - ), 60.0, 4.0), - ], output) - - def test_fetch(self): - ts = {'sampling': 60, 'size': 10, 'agg': 'mean'} - tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - - tsb.set_values([ - (datetime.datetime(2014, 1, 1, 11, 46, 4), 4), - (datetime.datetime(2014, 1, 1, 11, 47, 34), 8), - (datetime.datetime(2014, 1, 1, 11, 50, 54), 50), - (datetime.datetime(2014, 1, 1, 11, 54, 45), 4), - (datetime.datetime(2014, 1, 1, 11, 56, 49), 4), - (datetime.datetime(2014, 1, 1, 11, 57, 22), 6), - (datetime.datetime(2014, 1, 1, 11, 58, 22), 5), - (datetime.datetime(2014, 1, 1, 12, 1, 4), 4), - (datetime.datetime(2014, 1, 1, 12, 1, 9), 7), - (datetime.datetime(2014, 1, 1, 12, 2, 1), 15), - (datetime.datetime(2014, 1, 1, 12, 2, 12), 1), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 4, 9), 7), - (datetime.datetime(2014, 1, 1, 12, 5, 1), 15), - (datetime.datetime(2014, 1, 1, 12, 5, 12), 1), - (datetime.datetime(2014, 1, 1, 12, 6, 0, 2), 3), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - - tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, 6), 5), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - - self.assertEqual([ - (datetime.datetime(2014, 1, 1, 11, 54), 60.0, 4.0), - (datetime.datetime(2014, 1, 1, 11, 56), 60.0, 4.0), - (datetime.datetime(2014, 1, 1, 11, 57), 60.0, 6.0), - (datetime.datetime(2014, 1, 1, 11, 58), 60.0, 5.0), - (datetime.datetime(2014, 1, 1, 12, 1), 60.0, 5.5), - (datetime.datetime(2014, 1, 1, 12, 2), 60.0, 8.0), - (datetime.datetime(2014, 1, 1, 12, 3), 60.0, 3.0), - (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 7.0), - (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 8.0), - (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 4.0) - ], ts['return'].fetch()) - - self.assertEqual([ - (datetime.datetime(2014, 1, 1, 12, 1), 60.0, 5.5), - (datetime.datetime(2014, 1, 1, 12, 2), 60.0, 8.0), - (datetime.datetime(2014, 1, 1, 12, 3), 60.0, 3.0), - (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 7.0), - (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 8.0), - (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 4.0) - ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) - - def test_aggregated_some_overlap_with_fill_zero(self): - tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - - tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 12, 3, 0), 9), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 1), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 7), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 5), - (datetime.datetime(2014, 1, 1, 12, 8, 0), 3), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - - tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 13), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 24), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 16), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 12), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) - - output = carbonara.AggregatedTimeSerie.aggregated([ - tsc1['return'], tsc2['return']], aggregation='mean', fill=0) - - self.assertEqual([ - (datetime.datetime(2014, 1, 1, 12, 0, 0), 60.0, 3.0), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 60.0, 1.0), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 60.0, 6.5), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 60.0, 16.5), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 60.0, 2.5), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 60.0, 9.0), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 60.0, 9.5), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 60.0, 2.5), - (datetime.datetime(2014, 1, 1, 12, 8, 0), 60.0, 1.5), - ], output) - - def test_aggregated_some_overlap_with_fill_null(self): - tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - - tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 12, 3, 0), 9), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 1), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 7), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 5), - (datetime.datetime(2014, 1, 1, 12, 8, 0), 3), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - - tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 13), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 24), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 4), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 16), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 12), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) - - output = carbonara.AggregatedTimeSerie.aggregated([ - tsc1['return'], tsc2['return']], aggregation='mean', fill='null') - - self.assertEqual([ - (datetime.datetime(2014, 1, 1, 12, 0, 0), 60.0, 6.0), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 60.0, 2.0), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 60.0, 13.0), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 60.0, 16.5), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 60.0, 2.5), - (datetime.datetime(2014, 1, 1, 12, 5, 0), 60.0, 9.0), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 60.0, 9.5), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 60.0, 5.0), - (datetime.datetime(2014, 1, 1, 12, 8, 0), 60.0, 3.0), - ], output) - - def test_aggregate_no_points_with_fill_zero(self): - tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - - tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 12, 3, 0), 9), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 1), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 5), - (datetime.datetime(2014, 1, 1, 12, 8, 0), 3), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - - tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 2), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 13), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 24), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 4), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) - - output = carbonara.AggregatedTimeSerie.aggregated([ - tsc1['return'], tsc2['return']], aggregation='mean', fill=0) - - self.assertEqual([ - (datetime.datetime(2014, 1, 1, 12, 0, 0), 60.0, 3.0), - (datetime.datetime(2014, 1, 1, 12, 1, 0), 60.0, 1.0), - (datetime.datetime(2014, 1, 1, 12, 2, 0), 60.0, 6.5), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 60.0, 16.5), - (datetime.datetime(2014, 1, 1, 12, 4, 0), 60.0, 2.5), - (datetime.datetime(2014, 1, 1, 12, 7, 0), 60.0, 2.5), - (datetime.datetime(2014, 1, 1, 12, 8, 0), 60.0, 1.5), - ], output) - - def test_fetch_agg_pct(self): - ts = {'sampling': 1, 'size': 3600 * 24, 'agg': '90pct'} - tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - - tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 0, 123), 4), - (datetime.datetime(2014, 1, 1, 12, 0, 2), 4)], - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - - result = ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) - reference = [ - (datetime.datetime( - 2014, 1, 1, 12, 0, 0 - ), 1.0, 3.9), - (datetime.datetime( - 2014, 1, 1, 12, 0, 2 - ), 1.0, 4) - ] - - self.assertEqual(len(reference), len(result)) - - for ref, res in zip(reference, result): - self.assertEqual(ref[0], res[0]) - self.assertEqual(ref[1], res[1]) - # Rounding \o/ - self.assertAlmostEqual(ref[2], res[2]) - - tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 2, 113), 110)], - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - - result = ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0)) - reference = [ - (datetime.datetime( - 2014, 1, 1, 12, 0, 0 - ), 1.0, 3.9), - (datetime.datetime( - 2014, 1, 1, 12, 0, 2 - ), 1.0, 99.4) - ] - - self.assertEqual(len(reference), len(result)) - - for ref, res in zip(reference, result): - self.assertEqual(ref[0], res[0]) - self.assertEqual(ref[1], res[1]) - # Rounding \o/ - self.assertAlmostEqual(ref[2], res[2]) - - def test_fetch_nano(self): - ts = {'sampling': 0.2, 'size': 10, 'agg': 'mean'} - tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - - tsb.set_values([ - (datetime.datetime(2014, 1, 1, 11, 46, 0, 200123), 4), - (datetime.datetime(2014, 1, 1, 11, 46, 0, 340000), 8), - (datetime.datetime(2014, 1, 1, 11, 47, 0, 323154), 50), - (datetime.datetime(2014, 1, 1, 11, 48, 0, 590903), 4), - (datetime.datetime(2014, 1, 1, 11, 48, 0, 903291), 4), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - - tsb.set_values([ - (datetime.datetime(2014, 1, 1, 11, 48, 0, 821312), 5), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - - self.assertEqual([ - (datetime.datetime(2014, 1, 1, 11, 46, 0, 200000), 0.2, 6.0), - (datetime.datetime(2014, 1, 1, 11, 47, 0, 200000), 0.2, 50.0), - (datetime.datetime(2014, 1, 1, 11, 48, 0, 400000), 0.2, 4.0), - (datetime.datetime(2014, 1, 1, 11, 48, 0, 800000), 0.2, 4.5) - ], ts['return'].fetch()) - - def test_fetch_agg_std(self): - # NOTE (gordc): this is a good test to ensure we drop NaN entries - # 2014-01-01 12:00:00 will appear if we don't dropna() - ts = {'sampling': 60, 'size': 60, 'agg': 'std'} - tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - - tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 1, 4), 4), - (datetime.datetime(2014, 1, 1, 12, 1, 9), 7), - (datetime.datetime(2014, 1, 1, 12, 2, 1), 15), - (datetime.datetime(2014, 1, 1, 12, 2, 12), 1)], - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - - self.assertEqual([ - (datetime.datetime( - 2014, 1, 1, 12, 1, 0 - ), 60.0, 2.1213203435596424), - (datetime.datetime( - 2014, 1, 1, 12, 2, 0 - ), 60.0, 9.8994949366116654), - ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) - - tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)], - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - - self.assertEqual([ - (datetime.datetime( - 2014, 1, 1, 12, 1, 0 - ), 60.0, 2.1213203435596424), - (datetime.datetime( - 2014, 1, 1, 12, 2, 0 - ), 60.0, 59.304300012730948), - ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) - - def test_fetch_agg_max(self): - ts = {'sampling': 60, 'size': 60, 'agg': 'max'} - tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - - tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 0, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 1, 4), 4), - (datetime.datetime(2014, 1, 1, 12, 1, 9), 7), - (datetime.datetime(2014, 1, 1, 12, 2, 1), 15), - (datetime.datetime(2014, 1, 1, 12, 2, 12), 1)], - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - - self.assertEqual([ - (datetime.datetime( - 2014, 1, 1, 12, 0, 0 - ), 60.0, 3), - (datetime.datetime( - 2014, 1, 1, 12, 1, 0 - ), 60.0, 7), - (datetime.datetime( - 2014, 1, 1, 12, 2, 0 - ), 60.0, 15), - ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) - - tsb.set_values([(datetime.datetime(2014, 1, 1, 12, 2, 13), 110)], - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - - self.assertEqual([ - (datetime.datetime( - 2014, 1, 1, 12, 0, 0 - ), 60.0, 3), - (datetime.datetime( - 2014, 1, 1, 12, 1, 0 - ), 60.0, 7), - (datetime.datetime( - 2014, 1, 1, 12, 2, 0 - ), 60.0, 110), - ], ts['return'].fetch(datetime.datetime(2014, 1, 1, 12, 0, 0))) - - def test_serialize(self): - ts = {'sampling': 0.5, 'agg': 'mean'} - tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - - tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 0, 1234), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 0, 321), 6), - (datetime.datetime(2014, 1, 1, 12, 1, 4, 234), 5), - (datetime.datetime(2014, 1, 1, 12, 1, 9, 32), 7), - (datetime.datetime(2014, 1, 1, 12, 2, 12, 532), 1), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - - key = ts['return'].get_split_key() - o, s = ts['return'].serialize(key) - self.assertEqual(ts['return'], - carbonara.AggregatedTimeSerie.unserialize( - s, key, - 'mean', 0.5)) - - def test_no_truncation(self): - ts = {'sampling': 60, 'agg': 'mean'} - tsb = carbonara.BoundTimeSerie() - - for i in six.moves.range(1, 11): - tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, i, i), float(i)) - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, i, i + 1), float(i + 1)) - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - self.assertEqual(i, len(ts['return'].fetch())) - - def test_back_window(self): - """Back window testing. - - Test the back window on an archive is not longer than the window we - aggregate on. - """ - ts = {'sampling': 1, 'size': 60, 'agg': 'mean'} - tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - - tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 1, 2300), 1), - (datetime.datetime(2014, 1, 1, 12, 0, 1, 4600), 2), - (datetime.datetime(2014, 1, 1, 12, 0, 2, 4500), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 2, 7800), 4), - (datetime.datetime(2014, 1, 1, 12, 0, 3, 8), 2.5), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - - self.assertEqual( - [ - (datetime.datetime( - 2014, 1, 1, 12, 0, 1 - ), 1.0, 1.5), - (datetime.datetime( - 2014, 1, 1, 12, 0, 2 - ), 1.0, 3.5), - (datetime.datetime( - 2014, 1, 1, 12, 0, 3 - ), 1.0, 2.5), - ], - ts['return'].fetch()) - - try: - tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 2, 99), 9), - ]) - except carbonara.NoDeloreanAvailable as e: - self.assertEqual( - six.text_type(e), - u"2014-01-01 12:00:02.000099 is before 2014-01-01 12:00:03") - self.assertEqual(datetime.datetime(2014, 1, 1, 12, 0, 2, 99), - e.bad_timestamp) - self.assertEqual(datetime.datetime(2014, 1, 1, 12, 0, 3), - e.first_timestamp) - else: - self.fail("No exception raised") - - def test_back_window_ignore(self): - """Back window testing. - - Test the back window on an archive is not longer than the window we - aggregate on. - """ - ts = {'sampling': 1, 'size': 60, 'agg': 'mean'} - tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) - - tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 1, 2300), 1), - (datetime.datetime(2014, 1, 1, 12, 0, 1, 4600), 2), - (datetime.datetime(2014, 1, 1, 12, 0, 2, 4500), 3), - (datetime.datetime(2014, 1, 1, 12, 0, 2, 7800), 4), - (datetime.datetime(2014, 1, 1, 12, 0, 3, 8), 2.5), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - - self.assertEqual( - [ - (datetime.datetime( - 2014, 1, 1, 12, 0, 1 - ), 1.0, 1.5), - (datetime.datetime( - 2014, 1, 1, 12, 0, 2 - ), 1.0, 3.5), - (datetime.datetime( - 2014, 1, 1, 12, 0, 3 - ), 1.0, 2.5), - ], - ts['return'].fetch()) - - tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 2, 99), 9), - ], ignore_too_old_timestamps=True, - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - - self.assertEqual( - [ - (datetime.datetime( - 2014, 1, 1, 12, 0, 1 - ), 1.0, 1.5), - (datetime.datetime( - 2014, 1, 1, 12, 0, 2 - ), 1.0, 3.5), - (datetime.datetime( - 2014, 1, 1, 12, 0, 3 - ), 1.0, 2.5), - ], - ts['return'].fetch()) - - tsb.set_values([ - (datetime.datetime(2014, 1, 1, 12, 0, 2, 99), 9), - (datetime.datetime(2014, 1, 1, 12, 0, 3, 9), 4.5), - ], ignore_too_old_timestamps=True, - before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=ts)) - - self.assertEqual( - [ - (datetime.datetime( - 2014, 1, 1, 12, 0, 1 - ), 1.0, 1.5), - (datetime.datetime( - 2014, 1, 1, 12, 0, 2 - ), 1.0, 3.5), - (datetime.datetime( - 2014, 1, 1, 12, 0, 3 - ), 1.0, 3.5), - ], - ts['return'].fetch()) - - def test_aggregated_nominal(self): - tsc1 = {'sampling': 60, 'size': 10, 'agg': 'mean'} - tsc12 = {'sampling': 300, 'size': 6, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc12['sampling']) - tsc2 = {'sampling': 60, 'size': 10, 'agg': 'mean'} - tsc22 = {'sampling': 300, 'size': 6, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc22['sampling']) - - def ts1_update(ts): - grouped = ts.group_serie(tsc1['sampling']) - existing = tsc1.get('return') - tsc1['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, tsc1['sampling'], tsc1['agg'], - max_size=tsc1['size']) - if existing: - tsc1['return'].merge(existing) - grouped = ts.group_serie(tsc12['sampling']) - existing = tsc12.get('return') - tsc12['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, tsc12['sampling'], tsc12['agg'], - max_size=tsc12['size']) - if existing: - tsc12['return'].merge(existing) - - def ts2_update(ts): - grouped = ts.group_serie(tsc2['sampling']) - existing = tsc2.get('return') - tsc2['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, tsc2['sampling'], tsc2['agg'], - max_size=tsc2['size']) - if existing: - tsc2['return'].merge(existing) - grouped = ts.group_serie(tsc22['sampling']) - existing = tsc22.get('return') - tsc22['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( - grouped, tsc22['sampling'], tsc22['agg'], - max_size=tsc22['size']) - if existing: - tsc22['return'].merge(existing) - - tsb1.set_values([ - (datetime.datetime(2014, 1, 1, 11, 46, 4), 4), - (datetime.datetime(2014, 1, 1, 11, 47, 34), 8), - (datetime.datetime(2014, 1, 1, 11, 50, 54), 50), - (datetime.datetime(2014, 1, 1, 11, 54, 45), 4), - (datetime.datetime(2014, 1, 1, 11, 56, 49), 4), - (datetime.datetime(2014, 1, 1, 11, 57, 22), 6), - (datetime.datetime(2014, 1, 1, 11, 58, 22), 5), - (datetime.datetime(2014, 1, 1, 12, 1, 4), 4), - (datetime.datetime(2014, 1, 1, 12, 1, 9), 7), - (datetime.datetime(2014, 1, 1, 12, 2, 1), 15), - (datetime.datetime(2014, 1, 1, 12, 2, 12), 1), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 3), - (datetime.datetime(2014, 1, 1, 12, 4, 9), 7), - (datetime.datetime(2014, 1, 1, 12, 5, 1), 15), - (datetime.datetime(2014, 1, 1, 12, 5, 12), 1), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 3), - ], before_truncate_callback=ts1_update) - - tsb2.set_values([ - (datetime.datetime(2014, 1, 1, 11, 46, 4), 6), - (datetime.datetime(2014, 1, 1, 11, 47, 34), 5), - (datetime.datetime(2014, 1, 1, 11, 50, 54), 51), - (datetime.datetime(2014, 1, 1, 11, 54, 45), 5), - (datetime.datetime(2014, 1, 1, 11, 56, 49), 5), - (datetime.datetime(2014, 1, 1, 11, 57, 22), 7), - (datetime.datetime(2014, 1, 1, 11, 58, 22), 5), - (datetime.datetime(2014, 1, 1, 12, 1, 4), 5), - (datetime.datetime(2014, 1, 1, 12, 1, 9), 8), - (datetime.datetime(2014, 1, 1, 12, 2, 1), 10), - (datetime.datetime(2014, 1, 1, 12, 2, 12), 2), - (datetime.datetime(2014, 1, 1, 12, 3, 0), 6), - (datetime.datetime(2014, 1, 1, 12, 4, 9), 4), - (datetime.datetime(2014, 1, 1, 12, 5, 1), 10), - (datetime.datetime(2014, 1, 1, 12, 5, 12), 1), - (datetime.datetime(2014, 1, 1, 12, 6, 0), 1), - ], before_truncate_callback=ts2_update) - - output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1['return'], tsc12['return'], tsc2['return'], tsc22['return']], - 'mean') - self.assertEqual([ - (datetime.datetime(2014, 1, 1, 11, 45), 300.0, 5.75), - (datetime.datetime(2014, 1, 1, 11, 50), 300.0, 27.5), - (datetime.datetime(2014, 1, 1, 11, 55), 300.0, 5.3333333333333339), - (datetime.datetime(2014, 1, 1, 12, 0), 300.0, 6.0), - (datetime.datetime(2014, 1, 1, 12, 5), 300.0, 5.1666666666666661), - (datetime.datetime(2014, 1, 1, 11, 54), 60.0, 4.5), - (datetime.datetime(2014, 1, 1, 11, 56), 60.0, 4.5), - (datetime.datetime(2014, 1, 1, 11, 57), 60.0, 6.5), - (datetime.datetime(2014, 1, 1, 11, 58), 60.0, 5.0), - (datetime.datetime(2014, 1, 1, 12, 1), 60.0, 6.0), - (datetime.datetime(2014, 1, 1, 12, 2), 60.0, 7.0), - (datetime.datetime(2014, 1, 1, 12, 3), 60.0, 4.5), - (datetime.datetime(2014, 1, 1, 12, 4), 60.0, 5.5), - (datetime.datetime(2014, 1, 1, 12, 5), 60.0, 6.75), - (datetime.datetime(2014, 1, 1, 12, 6), 60.0, 2.0), - ], output) - - def test_aggregated_partial_overlap(self): - tsc1 = {'sampling': 1, 'size': 86400, 'agg': 'mean'} - tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) - tsc2 = {'sampling': 1, 'size': 60, 'agg': 'mean'} - tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) - - tsb1.set_values([ - (datetime.datetime(2015, 12, 3, 13, 19, 15), 1), - (datetime.datetime(2015, 12, 3, 13, 20, 15), 1), - (datetime.datetime(2015, 12, 3, 13, 21, 15), 1), - (datetime.datetime(2015, 12, 3, 13, 22, 15), 1), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc1)) - - tsb2.set_values([ - (datetime.datetime(2015, 12, 3, 13, 21, 15), 10), - (datetime.datetime(2015, 12, 3, 13, 22, 15), 10), - (datetime.datetime(2015, 12, 3, 13, 23, 15), 10), - (datetime.datetime(2015, 12, 3, 13, 24, 15), 10), - ], before_truncate_callback=functools.partial( - self._resample_and_merge, agg_dict=tsc2)) - - output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1['return'], tsc2['return']], aggregation="sum") - - self.assertEqual([ - (datetime.datetime( - 2015, 12, 3, 13, 21, 15 - ), 1.0, 11.0), - (datetime.datetime( - 2015, 12, 3, 13, 22, 15 - ), 1.0, 11.0), - ], output) - - dtfrom = datetime.datetime(2015, 12, 3, 13, 17, 0) - dtto = datetime.datetime(2015, 12, 3, 13, 25, 0) - - output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1['return'], tsc2['return']], - from_timestamp=dtfrom, to_timestamp=dtto, - aggregation="sum", needed_percent_of_overlap=0) - - self.assertEqual([ - (datetime.datetime( - 2015, 12, 3, 13, 19, 15 - ), 1.0, 1.0), - (datetime.datetime( - 2015, 12, 3, 13, 20, 15 - ), 1.0, 1.0), - (datetime.datetime( - 2015, 12, 3, 13, 21, 15 - ), 1.0, 11.0), - (datetime.datetime( - 2015, 12, 3, 13, 22, 15 - ), 1.0, 11.0), - (datetime.datetime( - 2015, 12, 3, 13, 23, 15 - ), 1.0, 10.0), - (datetime.datetime( - 2015, 12, 3, 13, 24, 15 - ), 1.0, 10.0), - ], output) - - # By default we require 100% of point that overlap - # so that fail if from or to is set - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.AggregatedTimeSerie.aggregated, - [tsc1['return'], tsc2['return']], - to_timestamp=dtto, aggregation='mean') - self.assertRaises(carbonara.UnAggregableTimeseries, - carbonara.AggregatedTimeSerie.aggregated, - [tsc1['return'], tsc2['return']], - from_timestamp=dtfrom, aggregation='mean') - - # Retry with 50% and it works - output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1['return'], tsc2['return']], from_timestamp=dtfrom, - aggregation="sum", - needed_percent_of_overlap=50.0) - self.assertEqual([ - (datetime.datetime( - 2015, 12, 3, 13, 19, 15 - ), 1.0, 1.0), - (datetime.datetime( - 2015, 12, 3, 13, 20, 15 - ), 1.0, 1.0), - (datetime.datetime( - 2015, 12, 3, 13, 21, 15 - ), 1.0, 11.0), - (datetime.datetime( - 2015, 12, 3, 13, 22, 15 - ), 1.0, 11.0), - ], output) - - output = carbonara.AggregatedTimeSerie.aggregated( - [tsc1['return'], tsc2['return']], to_timestamp=dtto, - aggregation="sum", - needed_percent_of_overlap=50.0) - self.assertEqual([ - (datetime.datetime( - 2015, 12, 3, 13, 21, 15 - ), 1.0, 11.0), - (datetime.datetime( - 2015, 12, 3, 13, 22, 15 - ), 1.0, 11.0), - (datetime.datetime( - 2015, 12, 3, 13, 23, 15 - ), 1.0, 10.0), - (datetime.datetime( - 2015, 12, 3, 13, 24, 15 - ), 1.0, 10.0), - ], output) - - def test_split_key(self): - self.assertEqual( - datetime.datetime(2014, 10, 7), - carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 3600).as_datetime()) - self.assertEqual( - datetime.datetime(2014, 12, 31, 18), - carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 58).as_datetime()) - self.assertEqual( - 1420048800.0, - float(carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 58))) - - key = carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 3600) - - self.assertGreater(key, pandas.Timestamp(0)) - - self.assertGreaterEqual(key, pandas.Timestamp(0)) - - def test_split_key_next(self): - self.assertEqual( - datetime.datetime(2015, 3, 6), - next(carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 3600)).as_datetime()) - self.assertEqual( - datetime.datetime(2015, 8, 3), - next(next(carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 3600))).as_datetime()) - self.assertEqual( - 113529600000.0, - float(next(carbonara.SplitKey.from_timestamp_and_sampling( - datetime.datetime(2015, 1, 1, 15, 3), 3600 * 24 * 365)))) - - def test_split(self): - sampling = 5 - points = 100000 - ts = carbonara.TimeSerie.from_data( - timestamps=map(datetime.datetime.utcfromtimestamp, - six.moves.range(points)), - values=six.moves.range(points)) - agg = self._resample(ts, sampling, 'mean') - - grouped_points = list(agg.split()) - - self.assertEqual( - math.ceil((points / float(sampling)) - / carbonara.SplitKey.POINTS_PER_SPLIT), - len(grouped_points)) - self.assertEqual("0.0", - str(carbonara.SplitKey(grouped_points[0][0], 0))) - # 3600 × 5s = 5 hours - self.assertEqual(datetime.datetime(1970, 1, 1, 5), - grouped_points[1][0].as_datetime()) - self.assertEqual(carbonara.SplitKey.POINTS_PER_SPLIT, - len(grouped_points[0][1])) - - def test_from_timeseries(self): - sampling = 5 - points = 100000 - ts = carbonara.TimeSerie.from_data( - timestamps=map(datetime.datetime.utcfromtimestamp, - six.moves.range(points)), - values=six.moves.range(points)) - agg = self._resample(ts, sampling, 'mean') - - split = [t[1] for t in list(agg.split())] - - self.assertEqual(agg, - carbonara.AggregatedTimeSerie.from_timeseries( - split, - sampling=agg.sampling, - max_size=agg.max_size, - aggregation_method=agg.aggregation_method)) - - def test_resample(self): - ts = carbonara.TimeSerie.from_data( - [datetime.datetime(2014, 1, 1, 12, 0, 0), - datetime.datetime(2014, 1, 1, 12, 0, 4), - datetime.datetime(2014, 1, 1, 12, 0, 9), - datetime.datetime(2014, 1, 1, 12, 0, 11), - datetime.datetime(2014, 1, 1, 12, 0, 12)], - [3, 5, 6, 2, 4]) - agg_ts = self._resample(ts, 5, 'mean') - self.assertEqual(3, len(agg_ts)) - - agg_ts = agg_ts.resample(10) - self.assertEqual(2, len(agg_ts)) - self.assertEqual(5, agg_ts[0]) - self.assertEqual(3, agg_ts[1]) diff --git a/gnocchi/tests/test_indexer.py b/gnocchi/tests/test_indexer.py deleted file mode 100644 index f6a292639..000000000 --- a/gnocchi/tests/test_indexer.py +++ /dev/null @@ -1,1245 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime -import operator -import uuid - -import mock - -from gnocchi import archive_policy -from gnocchi import indexer -from gnocchi.tests import base as tests_base -from gnocchi import utils - - -class MockException(Exception): - pass - - -class TestIndexer(tests_base.TestCase): - def test_get_driver(self): - driver = indexer.get_driver(self.conf) - self.assertIsInstance(driver, indexer.IndexerDriver) - - -class TestIndexerDriver(tests_base.TestCase): - - def test_create_archive_policy_already_exists(self): - # NOTE(jd) This archive policy - # is created by gnocchi.tests on setUp() :) - self.assertRaises(indexer.ArchivePolicyAlreadyExists, - self.index.create_archive_policy, - archive_policy.ArchivePolicy("high", 0, {})) - - def test_get_archive_policy(self): - ap = self.index.get_archive_policy("low") - self.assertEqual({ - 'back_window': 0, - 'aggregation_methods': - set(self.conf.archive_policy.default_aggregation_methods), - 'definition': [ - {u'granularity': 300, u'points': 12, u'timespan': 3600}, - {u'granularity': 3600, u'points': 24, u'timespan': 86400}, - {u'granularity': 86400, u'points': 30, u'timespan': 2592000}], - 'name': u'low'}, dict(ap)) - - def test_update_archive_policy(self): - self.assertRaises(indexer.UnsupportedArchivePolicyChange, - self.index.update_archive_policy, "low", - [archive_policy.ArchivePolicyItem(granularity=300, - points=10)]) - self.assertRaises(indexer.UnsupportedArchivePolicyChange, - self.index.update_archive_policy, "low", - [archive_policy.ArchivePolicyItem(granularity=300, - points=12), - archive_policy.ArchivePolicyItem(granularity=3600, - points=12), - archive_policy.ArchivePolicyItem(granularity=5, - points=6)]) - apname = str(uuid.uuid4()) - self.index.create_archive_policy(archive_policy.ArchivePolicy( - apname, 0, [(12, 300), (24, 3600), (30, 86400)])) - ap = self.index.update_archive_policy( - apname, [archive_policy.ArchivePolicyItem(granularity=300, - points=6), - archive_policy.ArchivePolicyItem(granularity=3600, - points=24), - archive_policy.ArchivePolicyItem(granularity=86400, - points=30)]) - self.assertEqual({ - 'back_window': 0, - 'aggregation_methods': - set(self.conf.archive_policy.default_aggregation_methods), - 'definition': [ - {u'granularity': 300, u'points': 6, u'timespan': 1800}, - {u'granularity': 3600, u'points': 24, u'timespan': 86400}, - {u'granularity': 86400, u'points': 30, u'timespan': 2592000}], - 'name': apname}, dict(ap)) - ap = self.index.update_archive_policy( - apname, [archive_policy.ArchivePolicyItem(granularity=300, - points=12), - archive_policy.ArchivePolicyItem(granularity=3600, - points=24), - archive_policy.ArchivePolicyItem(granularity=86400, - points=30)]) - self.assertEqual({ - 'back_window': 0, - 'aggregation_methods': - set(self.conf.archive_policy.default_aggregation_methods), - 'definition': [ - {u'granularity': 300, u'points': 12, u'timespan': 3600}, - {u'granularity': 3600, u'points': 24, u'timespan': 86400}, - {u'granularity': 86400, u'points': 30, u'timespan': 2592000}], - 'name': apname}, dict(ap)) - - def test_delete_archive_policy(self): - name = str(uuid.uuid4()) - self.index.create_archive_policy( - archive_policy.ArchivePolicy(name, 0, {})) - self.index.delete_archive_policy(name) - self.assertRaises(indexer.NoSuchArchivePolicy, - self.index.delete_archive_policy, - name) - self.assertRaises(indexer.NoSuchArchivePolicy, - self.index.delete_archive_policy, - str(uuid.uuid4())) - metric_id = uuid.uuid4() - self.index.create_metric(metric_id, str(uuid.uuid4()), "low") - self.assertRaises(indexer.ArchivePolicyInUse, - self.index.delete_archive_policy, - "low") - self.index.delete_metric(metric_id) - - def test_list_ap_rules_ordered(self): - name = str(uuid.uuid4()) - self.index.create_archive_policy( - archive_policy.ArchivePolicy(name, 0, {})) - self.index.create_archive_policy_rule('rule1', 'abc.*', name) - self.index.create_archive_policy_rule('rule2', 'abc.xyz.*', name) - self.index.create_archive_policy_rule('rule3', 'abc.xyz', name) - rules = self.index.list_archive_policy_rules() - # NOTE(jd) The test is not isolated, there might be more than 3 rules - found = 0 - for r in rules: - if r['metric_pattern'] == 'abc.xyz.*': - found = 1 - if found == 1 and r['metric_pattern'] == 'abc.xyz': - found = 2 - if found == 2 and r['metric_pattern'] == 'abc.*': - break - else: - self.fail("Metric patterns are not ordered") - - # Ensure we can't delete the archive policy - self.assertRaises(indexer.ArchivePolicyInUse, - self.index.delete_archive_policy, name) - - def test_create_metric(self): - r1 = uuid.uuid4() - creator = str(uuid.uuid4()) - m = self.index.create_metric(r1, creator, "low") - self.assertEqual(r1, m.id) - self.assertEqual(m.creator, creator) - self.assertIsNone(m.name) - self.assertIsNone(m.unit) - self.assertIsNone(m.resource_id) - m2 = self.index.list_metrics(id=r1) - self.assertEqual([m], m2) - - def test_create_named_metric_duplicate(self): - m1 = uuid.uuid4() - r1 = uuid.uuid4() - name = "foobar" - creator = str(uuid.uuid4()) - self.index.create_resource('generic', r1, creator) - m = self.index.create_metric(m1, creator, "low", - name=name, - resource_id=r1) - self.assertEqual(m1, m.id) - self.assertEqual(m.creator, creator) - self.assertEqual(name, m.name) - self.assertEqual(r1, m.resource_id) - m2 = self.index.list_metrics(id=m1) - self.assertEqual([m], m2) - - self.assertRaises(indexer.NamedMetricAlreadyExists, - self.index.create_metric, m1, creator, "low", - name=name, resource_id=r1) - - def test_expunge_metric(self): - r1 = uuid.uuid4() - creator = str(uuid.uuid4()) - m = self.index.create_metric(r1, creator, "low") - self.index.delete_metric(m.id) - try: - self.index.expunge_metric(m.id) - except indexer.NoSuchMetric: - # It's possible another test process expunged the metric just - # before us; in that case, we're good, we'll just check that the - # next call actually really raises NoSuchMetric anyway - pass - self.assertRaises(indexer.NoSuchMetric, - self.index.delete_metric, - m.id) - self.assertRaises(indexer.NoSuchMetric, - self.index.expunge_metric, - m.id) - - def test_create_resource(self): - r1 = uuid.uuid4() - creator = str(uuid.uuid4()) - rc = self.index.create_resource('generic', r1, creator) - self.assertIsNotNone(rc.started_at) - self.assertIsNotNone(rc.revision_start) - self.assertEqual({"id": r1, - "revision_start": rc.revision_start, - "revision_end": None, - "creator": creator, - "created_by_user_id": creator, - "created_by_project_id": "", - "user_id": None, - "project_id": None, - "started_at": rc.started_at, - "ended_at": None, - "original_resource_id": str(r1), - "type": "generic", - "metrics": {}}, - rc.jsonify()) - rg = self.index.get_resource('generic', r1, with_metrics=True) - self.assertEqual(rc, rg) - self.assertEqual(rc.metrics, rg.metrics) - - def test_create_resource_with_original_resource_id(self): - r1 = uuid.uuid4() - creator = str(uuid.uuid4()) - rc = self.index.create_resource('generic', r1, creator, - original_resource_id="foobar") - self.assertIsNotNone(rc.started_at) - self.assertIsNotNone(rc.revision_start) - self.assertEqual({"id": r1, - "revision_start": rc.revision_start, - "revision_end": None, - "creator": creator, - "created_by_user_id": creator, - "created_by_project_id": "", - "user_id": None, - "project_id": None, - "started_at": rc.started_at, - "ended_at": None, - "original_resource_id": "foobar", - "type": "generic", - "metrics": {}}, - rc.jsonify()) - rg = self.index.get_resource('generic', r1, with_metrics=True) - self.assertEqual(rc, rg) - self.assertEqual(rc.metrics, rg.metrics) - - def test_split_user_project_for_legacy_reasons(self): - r1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - creator = user + ":" + project - rc = self.index.create_resource('generic', r1, creator) - self.assertIsNotNone(rc.started_at) - self.assertIsNotNone(rc.revision_start) - self.assertEqual({"id": r1, - "revision_start": rc.revision_start, - "revision_end": None, - "creator": creator, - "created_by_user_id": user, - "created_by_project_id": project, - "user_id": None, - "project_id": None, - "started_at": rc.started_at, - "ended_at": None, - "original_resource_id": str(r1), - "type": "generic", - "metrics": {}}, - rc.jsonify()) - rg = self.index.get_resource('generic', r1, with_metrics=True) - self.assertEqual(rc, rg) - self.assertEqual(rc.metrics, rg.metrics) - - def test_create_non_existent_metric(self): - e = uuid.uuid4() - try: - self.index.create_resource( - 'generic', uuid.uuid4(), str(uuid.uuid4()), str(uuid.uuid4()), - metrics={"foo": e}) - except indexer.NoSuchMetric as ex: - self.assertEqual(e, ex.metric) - else: - self.fail("Exception not raised") - - def test_create_resource_already_exists(self): - r1 = uuid.uuid4() - creator = str(uuid.uuid4()) - self.index.create_resource('generic', r1, creator) - self.assertRaises(indexer.ResourceAlreadyExists, - self.index.create_resource, - 'generic', r1, creator) - - def test_create_resource_with_new_metrics(self): - r1 = uuid.uuid4() - creator = str(uuid.uuid4()) - rc = self.index.create_resource( - 'generic', r1, creator, - metrics={"foobar": {"archive_policy_name": "low"}}) - self.assertEqual(1, len(rc.metrics)) - m = self.index.list_metrics(id=rc.metrics[0].id) - self.assertEqual(m[0], rc.metrics[0]) - - def test_delete_resource(self): - r1 = uuid.uuid4() - self.index.create_resource('generic', r1, str(uuid.uuid4()), - str(uuid.uuid4())) - self.index.delete_resource(r1) - self.assertRaises(indexer.NoSuchResource, - self.index.delete_resource, - r1) - - def test_delete_resource_with_metrics(self): - creator = str(uuid.uuid4()) - e1 = uuid.uuid4() - e2 = uuid.uuid4() - self.index.create_metric(e1, creator, archive_policy_name="low") - self.index.create_metric(e2, creator, archive_policy_name="low") - r1 = uuid.uuid4() - self.index.create_resource('generic', r1, creator, - metrics={'foo': e1, 'bar': e2}) - self.index.delete_resource(r1) - self.assertRaises(indexer.NoSuchResource, - self.index.delete_resource, - r1) - metrics = self.index.list_metrics(ids=[e1, e2]) - self.assertEqual([], metrics) - - def test_delete_resource_non_existent(self): - r1 = uuid.uuid4() - self.assertRaises(indexer.NoSuchResource, - self.index.delete_resource, - r1) - - def test_create_resource_with_start_timestamp(self): - r1 = uuid.uuid4() - ts = utils.datetime_utc(2014, 1, 1, 23, 34, 23, 1234) - creator = str(uuid.uuid4()) - rc = self.index.create_resource('generic', r1, creator, started_at=ts) - self.assertEqual({"id": r1, - "revision_start": rc.revision_start, - "revision_end": None, - "creator": creator, - "created_by_user_id": creator, - "created_by_project_id": "", - "user_id": None, - "project_id": None, - "started_at": ts, - "ended_at": None, - "original_resource_id": str(r1), - "type": "generic", - "metrics": {}}, rc.jsonify()) - r = self.index.get_resource('generic', r1, with_metrics=True) - self.assertEqual(rc, r) - - def test_create_resource_with_metrics(self): - r1 = uuid.uuid4() - e1 = uuid.uuid4() - e2 = uuid.uuid4() - creator = str(uuid.uuid4()) - self.index.create_metric(e1, creator, - archive_policy_name="low") - self.index.create_metric(e2, creator, - archive_policy_name="low") - rc = self.index.create_resource('generic', r1, creator, - metrics={'foo': e1, 'bar': e2}) - self.assertIsNotNone(rc.started_at) - self.assertIsNotNone(rc.revision_start) - self.assertEqual({"id": r1, - "revision_start": rc.revision_start, - "revision_end": None, - "creator": creator, - "created_by_user_id": creator, - "created_by_project_id": "", - "user_id": None, - "project_id": None, - "started_at": rc.started_at, - "ended_at": None, - "original_resource_id": str(r1), - "type": "generic", - "metrics": {'foo': str(e1), 'bar': str(e2)}}, - rc.jsonify()) - r = self.index.get_resource('generic', r1, with_metrics=True) - self.assertIsNotNone(r.started_at) - self.assertEqual({"id": r1, - "revision_start": r.revision_start, - "revision_end": None, - "creator": creator, - "created_by_user_id": creator, - "created_by_project_id": "", - "type": "generic", - "started_at": rc.started_at, - "ended_at": None, - "user_id": None, - "project_id": None, - "original_resource_id": str(r1), - "metrics": {'foo': str(e1), 'bar': str(e2)}}, - r.jsonify()) - - def test_update_non_existent_resource_end_timestamp(self): - r1 = uuid.uuid4() - self.assertRaises( - indexer.NoSuchResource, - self.index.update_resource, - 'generic', - r1, - ended_at=datetime.datetime(2014, 1, 1, 2, 3, 4)) - - def test_update_resource_end_timestamp(self): - r1 = uuid.uuid4() - creator = str(uuid.uuid4()) - self.index.create_resource('generic', r1, creator) - self.index.update_resource( - 'generic', - r1, - ended_at=utils.datetime_utc(2043, 1, 1, 2, 3, 4)) - r = self.index.get_resource('generic', r1, with_metrics=True) - self.assertIsNotNone(r.started_at) - self.assertIsNone(r.user_id) - self.assertIsNone(r.project_id) - self.assertIsNone(r.revision_end) - self.assertIsNotNone(r.revision_start) - self.assertEqual(r1, r.id) - self.assertEqual(creator, r.creator) - self.assertEqual(utils.datetime_utc(2043, 1, 1, 2, 3, 4), r.ended_at) - self.assertEqual("generic", r.type) - self.assertEqual(0, len(r.metrics)) - self.index.update_resource( - 'generic', - r1, - ended_at=None) - r = self.index.get_resource('generic', r1, with_metrics=True) - self.assertIsNotNone(r.started_at) - self.assertIsNotNone(r.revision_start) - self.assertEqual({"id": r1, - "revision_start": r.revision_start, - "revision_end": None, - "ended_at": None, - "created_by_project_id": "", - "created_by_user_id": creator, - "creator": creator, - "user_id": None, - "project_id": None, - "type": "generic", - "started_at": r.started_at, - "original_resource_id": str(r1), - "metrics": {}}, r.jsonify()) - - def test_update_resource_metrics(self): - r1 = uuid.uuid4() - e1 = uuid.uuid4() - e2 = uuid.uuid4() - creator = str(uuid.uuid4()) - self.index.create_metric(e1, creator, archive_policy_name="low") - self.index.create_resource('generic', r1, creator, metrics={'foo': e1}) - self.index.create_metric(e2, creator, archive_policy_name="low") - rc = self.index.update_resource('generic', r1, metrics={'bar': e2}) - r = self.index.get_resource('generic', r1, with_metrics=True) - self.assertEqual(rc, r) - - def test_update_resource_metrics_append(self): - r1 = uuid.uuid4() - e1 = uuid.uuid4() - e2 = uuid.uuid4() - creator = str(uuid.uuid4()) - self.index.create_metric(e1, creator, - archive_policy_name="low") - self.index.create_metric(e2, creator, - archive_policy_name="low") - self.index.create_resource('generic', r1, creator, - metrics={'foo': e1}) - rc = self.index.update_resource('generic', r1, metrics={'bar': e2}, - append_metrics=True) - r = self.index.get_resource('generic', r1, with_metrics=True) - self.assertEqual(rc, r) - metric_names = [m.name for m in rc.metrics] - self.assertIn('foo', metric_names) - self.assertIn('bar', metric_names) - - def test_update_resource_metrics_append_fail(self): - r1 = uuid.uuid4() - e1 = uuid.uuid4() - e2 = uuid.uuid4() - creator = str(uuid.uuid4()) - self.index.create_metric(e1, creator, - archive_policy_name="low") - self.index.create_metric(e2, creator, - archive_policy_name="low") - self.index.create_resource('generic', r1, creator, - metrics={'foo': e1}) - - self.assertRaises(indexer.NamedMetricAlreadyExists, - self.index.update_resource, - 'generic', r1, metrics={'foo': e2}, - append_metrics=True) - r = self.index.get_resource('generic', r1, with_metrics=True) - self.assertEqual(e1, r.metrics[0].id) - - def test_update_resource_attribute(self): - mgr = self.index.get_resource_type_schema() - resource_type = str(uuid.uuid4()) - rtype = mgr.resource_type_from_dict(resource_type, { - "col1": {"type": "string", "required": True, - "min_length": 2, "max_length": 15} - }, 'creating') - r1 = uuid.uuid4() - creator = str(uuid.uuid4()) - # Create - self.index.create_resource_type(rtype) - - rc = self.index.create_resource(resource_type, r1, creator, - col1="foo") - rc = self.index.update_resource(resource_type, r1, col1="foo") - r = self.index.get_resource(resource_type, r1, with_metrics=True) - self.assertEqual(rc, r) - - def test_update_resource_no_change(self): - mgr = self.index.get_resource_type_schema() - resource_type = str(uuid.uuid4()) - rtype = mgr.resource_type_from_dict(resource_type, { - "col1": {"type": "string", "required": True, - "min_length": 2, "max_length": 15} - }, 'creating') - self.index.create_resource_type(rtype) - r1 = uuid.uuid4() - creator = str(uuid.uuid4()) - rc = self.index.create_resource(resource_type, r1, creator, - col1="foo") - updated = self.index.update_resource(resource_type, r1, col1="foo", - create_revision=False) - r = self.index.list_resources(resource_type, - {"=": {"id": r1}}, - history=True) - self.assertEqual(1, len(r)) - self.assertEqual(dict(rc), dict(r[0])) - self.assertEqual(dict(updated), dict(r[0])) - - def test_update_resource_ended_at_fail(self): - r1 = uuid.uuid4() - creator = str(uuid.uuid4()) - self.index.create_resource('generic', r1, creator) - self.assertRaises( - indexer.ResourceValueError, - self.index.update_resource, - 'generic', r1, - ended_at=utils.datetime_utc(2010, 1, 1, 1, 1, 1)) - - def test_update_resource_unknown_attribute(self): - mgr = self.index.get_resource_type_schema() - resource_type = str(uuid.uuid4()) - rtype = mgr.resource_type_from_dict(resource_type, { - "col1": {"type": "string", "required": False, - "min_length": 1, "max_length": 2}, - }, 'creating') - self.index.create_resource_type(rtype) - r1 = uuid.uuid4() - self.index.create_resource(resource_type, r1, - str(uuid.uuid4()), str(uuid.uuid4())) - self.assertRaises(indexer.ResourceAttributeError, - self.index.update_resource, - resource_type, - r1, foo="bar") - - def test_update_non_existent_metric(self): - r1 = uuid.uuid4() - e1 = uuid.uuid4() - self.index.create_resource('generic', r1, str(uuid.uuid4()), - str(uuid.uuid4())) - self.assertRaises(indexer.NoSuchMetric, - self.index.update_resource, - 'generic', - r1, metrics={'bar': e1}) - - def test_update_non_existent_resource(self): - r1 = uuid.uuid4() - e1 = uuid.uuid4() - self.index.create_metric(e1, str(uuid.uuid4()), - archive_policy_name="low") - self.assertRaises(indexer.NoSuchResource, - self.index.update_resource, - 'generic', - r1, metrics={'bar': e1}) - - def test_create_resource_with_non_existent_metrics(self): - r1 = uuid.uuid4() - e1 = uuid.uuid4() - self.assertRaises(indexer.NoSuchMetric, - self.index.create_resource, - 'generic', - r1, str(uuid.uuid4()), str(uuid.uuid4()), - metrics={'foo': e1}) - - def test_delete_metric_on_resource(self): - r1 = uuid.uuid4() - e1 = uuid.uuid4() - e2 = uuid.uuid4() - creator = str(uuid.uuid4()) - self.index.create_metric(e1, creator, - archive_policy_name="low") - self.index.create_metric(e2, creator, - archive_policy_name="low") - rc = self.index.create_resource('generic', r1, creator, - metrics={'foo': e1, 'bar': e2}) - self.index.delete_metric(e1) - self.assertRaises(indexer.NoSuchMetric, self.index.delete_metric, e1) - r = self.index.get_resource('generic', r1, with_metrics=True) - self.assertIsNotNone(r.started_at) - self.assertIsNotNone(r.revision_start) - self.assertEqual({"id": r1, - "started_at": r.started_at, - "revision_start": rc.revision_start, - "revision_end": None, - "ended_at": None, - "creator": creator, - "created_by_project_id": "", - "created_by_user_id": creator, - "user_id": None, - "project_id": None, - "original_resource_id": str(r1), - "type": "generic", - "metrics": {'bar': str(e2)}}, r.jsonify()) - - def test_delete_resource_custom(self): - mgr = self.index.get_resource_type_schema() - resource_type = str(uuid.uuid4()) - self.index.create_resource_type( - mgr.resource_type_from_dict(resource_type, { - "flavor_id": {"type": "string", - "min_length": 1, - "max_length": 20, - "required": True} - }, 'creating')) - r1 = uuid.uuid4() - created = self.index.create_resource(resource_type, r1, - str(uuid.uuid4()), - str(uuid.uuid4()), - flavor_id="foo") - got = self.index.get_resource(resource_type, r1, with_metrics=True) - self.assertEqual(created, got) - self.index.delete_resource(r1) - got = self.index.get_resource(resource_type, r1) - self.assertIsNone(got) - - def test_list_resources_by_unknown_field(self): - self.assertRaises(indexer.ResourceAttributeError, - self.index.list_resources, - 'generic', - attribute_filter={"=": {"fern": "bar"}}) - - def test_list_resources_by_user(self): - r1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - g = self.index.create_resource('generic', r1, user + ":" + project, - user, project) - resources = self.index.list_resources( - 'generic', - attribute_filter={"=": {"user_id": user}}) - self.assertEqual(1, len(resources)) - self.assertEqual(g, resources[0]) - resources = self.index.list_resources( - 'generic', - attribute_filter={"=": {"user_id": 'bad-user'}}) - self.assertEqual(0, len(resources)) - - def test_list_resources_by_created_by_user_id(self): - r1 = uuid.uuid4() - creator = str(uuid.uuid4()) - g = self.index.create_resource('generic', r1, creator + ":" + creator) - resources = self.index.list_resources( - 'generic', - attribute_filter={"=": {"created_by_user_id": creator}}) - self.assertEqual([g], resources) - resources = self.index.list_resources( - 'generic', - attribute_filter={"=": {"created_by_user_id": 'bad-user'}}) - self.assertEqual([], resources) - - def test_list_resources_by_creator(self): - r1 = uuid.uuid4() - creator = str(uuid.uuid4()) - g = self.index.create_resource('generic', r1, creator) - resources = self.index.list_resources( - 'generic', - attribute_filter={"=": {"creator": creator}}) - self.assertEqual(1, len(resources)) - self.assertEqual(g, resources[0]) - resources = self.index.list_resources( - 'generic', - attribute_filter={"=": {"creator": 'bad-user'}}) - self.assertEqual(0, len(resources)) - - def test_list_resources_by_user_with_details(self): - r1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - creator = user + ":" + project - g = self.index.create_resource('generic', r1, creator, - user, project) - mgr = self.index.get_resource_type_schema() - resource_type = str(uuid.uuid4()) - self.index.create_resource_type( - mgr.resource_type_from_dict(resource_type, {}, 'creating')) - r2 = uuid.uuid4() - i = self.index.create_resource(resource_type, r2, creator, - user, project) - resources = self.index.list_resources( - 'generic', - attribute_filter={"=": {"user_id": user}}, - details=True, - ) - self.assertEqual(2, len(resources)) - self.assertIn(g, resources) - self.assertIn(i, resources) - - def test_list_resources_by_project(self): - r1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - creator = user + ":" + project - g = self.index.create_resource('generic', r1, creator, user, project) - resources = self.index.list_resources( - 'generic', - attribute_filter={"=": {"project_id": project}}) - self.assertEqual(1, len(resources)) - self.assertEqual(g, resources[0]) - resources = self.index.list_resources( - 'generic', - attribute_filter={"=": {"project_id": 'bad-project'}}) - self.assertEqual(0, len(resources)) - - def test_list_resources_by_duration(self): - r1 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - g = self.index.create_resource( - 'generic', r1, user + ":" + project, user, project, - started_at=utils.datetime_utc(2010, 1, 1, 12, 0), - ended_at=utils.datetime_utc(2010, 1, 1, 13, 0)) - resources = self.index.list_resources( - 'generic', - attribute_filter={"and": [ - {"=": {"user_id": user}}, - {">": {"lifespan": 1800}}, - ]}) - self.assertEqual(1, len(resources)) - self.assertEqual(g, resources[0]) - resources = self.index.list_resources( - 'generic', - attribute_filter={"and": [ - {"=": {"project_id": project}}, - {">": {"lifespan": 7200}}, - ]}) - self.assertEqual(0, len(resources)) - - def test_list_resources(self): - # NOTE(jd) So this test is a bit fuzzy right now as we uses the same - # database for all tests and the tests are running concurrently, but - # for now it'll be better than nothing. - r1 = uuid.uuid4() - g = self.index.create_resource('generic', r1, - str(uuid.uuid4()), str(uuid.uuid4())) - mgr = self.index.get_resource_type_schema() - resource_type = str(uuid.uuid4()) - self.index.create_resource_type( - mgr.resource_type_from_dict(resource_type, {}, 'creating')) - r2 = uuid.uuid4() - i = self.index.create_resource(resource_type, r2, - str(uuid.uuid4()), str(uuid.uuid4())) - resources = self.index.list_resources('generic') - self.assertGreaterEqual(len(resources), 2) - g_found = False - i_found = False - for r in resources: - if r.id == r1: - self.assertEqual(g, r) - g_found = True - elif r.id == r2: - i_found = True - if i_found and g_found: - break - else: - self.fail("Some resources were not found") - - resources = self.index.list_resources(resource_type) - self.assertGreaterEqual(len(resources), 1) - for r in resources: - if r.id == r2: - self.assertEqual(i, r) - break - else: - self.fail("Some resources were not found") - - def test_list_resource_attribute_type_numeric(self): - """Test that we can pass an integer to filter on a string type.""" - mgr = self.index.get_resource_type_schema() - resource_type = str(uuid.uuid4()) - self.index.create_resource_type( - mgr.resource_type_from_dict(resource_type, { - "flavor_id": {"type": "string", - "min_length": 1, - "max_length": 20, - "required": False}, - }, 'creating')) - r = self.index.list_resources( - resource_type, attribute_filter={"=": {"flavor_id": 1.0}}) - self.assertEqual(0, len(r)) - - def test_list_resource_weird_date(self): - self.assertRaises( - indexer.QueryValueError, - self.index.list_resources, - 'generic', - attribute_filter={"=": {"started_at": "f00bar"}}) - - def test_list_resources_without_history(self): - e = uuid.uuid4() - rid = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - new_user = str(uuid.uuid4()) - new_project = str(uuid.uuid4()) - - self.index.create_metric(e, user + ":" + project, - archive_policy_name="low") - - self.index.create_resource('generic', rid, user + ":" + project, - user, project, - metrics={'foo': e}) - r2 = self.index.update_resource('generic', rid, user_id=new_user, - project_id=new_project, - append_metrics=True).jsonify() - - self.assertEqual({'foo': str(e)}, r2['metrics']) - self.assertEqual(new_user, r2['user_id']) - self.assertEqual(new_project, r2['project_id']) - resources = self.index.list_resources('generic', history=False, - details=True) - self.assertGreaterEqual(len(resources), 1) - expected_resources = [r.jsonify() for r in resources - if r.id == rid] - self.assertIn(r2, expected_resources) - - def test_list_resources_with_history(self): - e1 = uuid.uuid4() - e2 = uuid.uuid4() - rid = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - creator = user + ":" + project - new_user = str(uuid.uuid4()) - new_project = str(uuid.uuid4()) - - self.index.create_metric(e1, creator, archive_policy_name="low") - self.index.create_metric(e2, creator, archive_policy_name="low") - self.index.create_metric(uuid.uuid4(), creator, - archive_policy_name="low") - - r1 = self.index.create_resource('generic', rid, creator, user, project, - metrics={'foo': e1, 'bar': e2} - ).jsonify() - r2 = self.index.update_resource('generic', rid, user_id=new_user, - project_id=new_project, - append_metrics=True).jsonify() - - r1['revision_end'] = r2['revision_start'] - r2['revision_end'] = None - self.assertEqual({'foo': str(e1), - 'bar': str(e2)}, r2['metrics']) - self.assertEqual(new_user, r2['user_id']) - self.assertEqual(new_project, r2['project_id']) - resources = self.index.list_resources('generic', history=True, - details=False, - attribute_filter={ - "=": {"id": rid}}) - self.assertGreaterEqual(len(resources), 2) - resources = sorted( - [r.jsonify() for r in resources], - key=operator.itemgetter("revision_start")) - self.assertEqual([r1, r2], resources) - - def test_list_resources_custom_with_history(self): - e1 = uuid.uuid4() - e2 = uuid.uuid4() - rid = uuid.uuid4() - creator = str(uuid.uuid4()) - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - new_user = str(uuid.uuid4()) - new_project = str(uuid.uuid4()) - - mgr = self.index.get_resource_type_schema() - resource_type = str(uuid.uuid4()) - self.index.create_resource_type( - mgr.resource_type_from_dict(resource_type, { - "col1": {"type": "string", "required": True, - "min_length": 2, "max_length": 15} - }, 'creating')) - - self.index.create_metric(e1, creator, - archive_policy_name="low") - self.index.create_metric(e2, creator, - archive_policy_name="low") - self.index.create_metric(uuid.uuid4(), creator, - archive_policy_name="low") - - r1 = self.index.create_resource(resource_type, rid, creator, - user, project, - col1="foo", - metrics={'foo': e1, 'bar': e2} - ).jsonify() - r2 = self.index.update_resource(resource_type, rid, user_id=new_user, - project_id=new_project, - col1="bar", - append_metrics=True).jsonify() - - r1['revision_end'] = r2['revision_start'] - r2['revision_end'] = None - self.assertEqual({'foo': str(e1), - 'bar': str(e2)}, r2['metrics']) - self.assertEqual(new_user, r2['user_id']) - self.assertEqual(new_project, r2['project_id']) - self.assertEqual('bar', r2['col1']) - resources = self.index.list_resources(resource_type, history=True, - details=False, - attribute_filter={ - "=": {"id": rid}}) - self.assertGreaterEqual(len(resources), 2) - resources = sorted( - [r.jsonify() for r in resources], - key=operator.itemgetter("revision_start")) - self.assertEqual([r1, r2], resources) - - def test_list_resources_started_after_ended_before(self): - # NOTE(jd) So this test is a bit fuzzy right now as we uses the same - # database for all tests and the tests are running concurrently, but - # for now it'll be better than nothing. - r1 = uuid.uuid4() - creator = str(uuid.uuid4()) - g = self.index.create_resource( - 'generic', r1, creator, - started_at=utils.datetime_utc(2000, 1, 1, 23, 23, 23), - ended_at=utils.datetime_utc(2000, 1, 3, 23, 23, 23)) - r2 = uuid.uuid4() - mgr = self.index.get_resource_type_schema() - resource_type = str(uuid.uuid4()) - self.index.create_resource_type( - mgr.resource_type_from_dict(resource_type, {}, 'creating')) - i = self.index.create_resource( - resource_type, r2, creator, - started_at=utils.datetime_utc(2000, 1, 1, 23, 23, 23), - ended_at=utils.datetime_utc(2000, 1, 4, 23, 23, 23)) - resources = self.index.list_resources( - 'generic', - attribute_filter={ - "and": - [{">=": {"started_at": - utils.datetime_utc(2000, 1, 1, 23, 23, 23)}}, - {"<": {"ended_at": - utils.datetime_utc(2000, 1, 5, 23, 23, 23)}}]}) - self.assertGreaterEqual(len(resources), 2) - g_found = False - i_found = False - for r in resources: - if r.id == r1: - self.assertEqual(g, r) - g_found = True - elif r.id == r2: - i_found = True - if i_found and g_found: - break - else: - self.fail("Some resources were not found") - - resources = self.index.list_resources( - resource_type, - attribute_filter={ - ">=": { - "started_at": datetime.datetime(2000, 1, 1, 23, 23, 23) - }, - }) - self.assertGreaterEqual(len(resources), 1) - for r in resources: - if r.id == r2: - self.assertEqual(i, r) - break - else: - self.fail("Some resources were not found") - - resources = self.index.list_resources( - 'generic', - attribute_filter={ - "<": { - "ended_at": datetime.datetime(1999, 1, 1, 23, 23, 23) - }, - }) - self.assertEqual(0, len(resources)) - - def test_deletes_resources(self): - r1 = uuid.uuid4() - r2 = uuid.uuid4() - user = str(uuid.uuid4()) - project = str(uuid.uuid4()) - creator = user + ":" + project - metrics = {'foo': {'archive_policy_name': 'medium'}} - g1 = self.index.create_resource('generic', r1, creator, - user, project, metrics=metrics) - g2 = self.index.create_resource('generic', r2, creator, - user, project, metrics=metrics) - - metrics = self.index.list_metrics(ids=[g1['metrics'][0]['id'], - g2['metrics'][0]['id']]) - self.assertEqual(2, len(metrics)) - for m in metrics: - self.assertEqual('active', m['status']) - - deleted = self.index.delete_resources( - 'generic', - attribute_filter={"=": {"user_id": user}}) - self.assertEqual(2, deleted) - - resources = self.index.list_resources( - 'generic', - attribute_filter={"=": {"user_id": user}}) - self.assertEqual(0, len(resources)) - - metrics = self.index.list_metrics(ids=[g1['metrics'][0]['id'], - g2['metrics'][0]['id']], - status='delete') - self.assertEqual(2, len(metrics)) - for m in metrics: - self.assertEqual('delete', m['status']) - - def test_get_metric(self): - e1 = uuid.uuid4() - creator = str(uuid.uuid4()) - self.index.create_metric(e1, creator, archive_policy_name="low") - - metric = self.index.list_metrics(id=e1) - self.assertEqual(1, len(metric)) - metric = metric[0] - self.assertEqual(e1, metric.id) - self.assertEqual(metric.creator, creator) - self.assertIsNone(metric.name) - self.assertIsNone(metric.resource_id) - - def test_get_metric_with_details(self): - e1 = uuid.uuid4() - creator = str(uuid.uuid4()) - self.index.create_metric(e1, - creator, - archive_policy_name="low") - - metric = self.index.list_metrics(id=e1) - self.assertEqual(1, len(metric)) - metric = metric[0] - self.assertEqual(e1, metric.id) - self.assertEqual(metric.creator, creator) - self.assertIsNone(metric.name) - self.assertIsNone(metric.resource_id) - self.assertEqual(self.archive_policies['low'], metric.archive_policy) - - def test_get_metric_with_bad_uuid(self): - e1 = uuid.uuid4() - self.assertEqual([], self.index.list_metrics(id=e1)) - - def test_get_metric_empty_list_uuids(self): - self.assertEqual([], self.index.list_metrics(ids=[])) - - def test_list_metrics(self): - e1 = uuid.uuid4() - creator = str(uuid.uuid4()) - self.index.create_metric(e1, creator, archive_policy_name="low") - e2 = uuid.uuid4() - self.index.create_metric(e2, creator, archive_policy_name="low") - metrics = self.index.list_metrics() - id_list = [m.id for m in metrics] - self.assertIn(e1, id_list) - # Test ordering - if e1 < e2: - self.assertLess(id_list.index(e1), id_list.index(e2)) - else: - self.assertLess(id_list.index(e2), id_list.index(e1)) - - def test_list_metrics_delete_status(self): - e1 = uuid.uuid4() - self.index.create_metric(e1, str(uuid.uuid4()), - archive_policy_name="low") - self.index.delete_metric(e1) - metrics = self.index.list_metrics() - self.assertNotIn(e1, [m.id for m in metrics]) - - def test_resource_type_crud(self): - mgr = self.index.get_resource_type_schema() - rtype = mgr.resource_type_from_dict("indexer_test", { - "col1": {"type": "string", "required": True, - "min_length": 2, "max_length": 15} - }, "creating") - - # Create - self.index.create_resource_type(rtype) - self.assertRaises(indexer.ResourceTypeAlreadyExists, - self.index.create_resource_type, - rtype) - - # Get - rtype = self.index.get_resource_type("indexer_test") - self.assertEqual("indexer_test", rtype.name) - self.assertEqual(1, len(rtype.attributes)) - self.assertEqual("col1", rtype.attributes[0].name) - self.assertEqual("string", rtype.attributes[0].typename) - self.assertEqual(15, rtype.attributes[0].max_length) - self.assertEqual(2, rtype.attributes[0].min_length) - self.assertEqual("active", rtype.state) - - # List - rtypes = self.index.list_resource_types() - for rtype in rtypes: - if rtype.name == "indexer_test": - break - else: - self.fail("indexer_test not found") - - # Test resource itself - rid = uuid.uuid4() - self.index.create_resource("indexer_test", rid, - str(uuid.uuid4()), - str(uuid.uuid4()), - col1="col1_value") - r = self.index.get_resource("indexer_test", rid) - self.assertEqual("indexer_test", r.type) - self.assertEqual("col1_value", r.col1) - - # Update the resource type - add_attrs = mgr.resource_type_from_dict("indexer_test", { - "col2": {"type": "number", "required": False, - "max": 100, "min": 0}, - "col3": {"type": "number", "required": True, - "max": 100, "min": 0, "options": {'fill': 15}} - }, "creating").attributes - self.index.update_resource_type("indexer_test", - add_attributes=add_attrs) - - # Check the new attribute - r = self.index.get_resource("indexer_test", rid) - self.assertIsNone(r.col2) - self.assertEqual(15, r.col3) - - self.index.update_resource("indexer_test", rid, col2=10) - - rl = self.index.list_resources('indexer_test', - {"=": {"id": rid}}, - history=True, - sorts=['revision_start:asc', - 'started_at:asc']) - self.assertEqual(2, len(rl)) - self.assertIsNone(rl[0].col2) - self.assertEqual(10, rl[1].col2) - self.assertEqual(15, rl[0].col3) - self.assertEqual(15, rl[1].col3) - - # Deletion - self.assertRaises(indexer.ResourceTypeInUse, - self.index.delete_resource_type, - "indexer_test") - self.index.delete_resource(rid) - self.index.delete_resource_type("indexer_test") - - # Ensure it's deleted - self.assertRaises(indexer.NoSuchResourceType, - self.index.get_resource_type, - "indexer_test") - - self.assertRaises(indexer.NoSuchResourceType, - self.index.delete_resource_type, - "indexer_test") - - def _get_rt_state(self, name): - return self.index.get_resource_type(name).state - - def test_resource_type_unexpected_creation_error(self): - mgr = self.index.get_resource_type_schema() - rtype = mgr.resource_type_from_dict("indexer_test_fail", { - "col1": {"type": "string", "required": True, - "min_length": 2, "max_length": 15} - }, "creating") - - states = {'before': None, - 'after': None} - - def map_and_create_mock(rt, conn): - states['before'] = self._get_rt_state("indexer_test_fail") - raise MockException("boom!") - - with mock.patch.object(self.index._RESOURCE_TYPE_MANAGER, - "map_and_create_tables", - side_effect=map_and_create_mock): - self.assertRaises(MockException, - self.index.create_resource_type, - rtype) - states['after'] = self._get_rt_state('indexer_test_fail') - - self.assertEqual([('after', 'creation_error'), - ('before', 'creating')], - sorted(states.items())) - - def test_resource_type_unexpected_deleting_error(self): - mgr = self.index.get_resource_type_schema() - rtype = mgr.resource_type_from_dict("indexer_test_fail2", { - "col1": {"type": "string", "required": True, - "min_length": 2, "max_length": 15} - }, "creating") - self.index.create_resource_type(rtype) - - states = {'before': None, - 'after': None} - - def map_and_create_mock(rt, conn): - states['before'] = self._get_rt_state("indexer_test_fail2") - raise MockException("boom!") - - with mock.patch.object(self.index._RESOURCE_TYPE_MANAGER, - "unmap_and_delete_tables", - side_effect=map_and_create_mock): - self.assertRaises(MockException, - self.index.delete_resource_type, - rtype.name) - states['after'] = self._get_rt_state('indexer_test_fail2') - - self.assertEqual([('after', 'deletion_error'), - ('before', 'deleting')], - sorted(states.items())) - - # We can cleanup the mess ! - self.index.delete_resource_type("indexer_test_fail2") - - # Ensure it's deleted - self.assertRaises(indexer.NoSuchResourceType, - self.index.get_resource_type, - "indexer_test_fail2") - - self.assertRaises(indexer.NoSuchResourceType, - self.index.delete_resource_type, - "indexer_test_fail2") diff --git a/gnocchi/tests/test_rest.py b/gnocchi/tests/test_rest.py deleted file mode 100644 index 9caf9b39f..000000000 --- a/gnocchi/tests/test_rest.py +++ /dev/null @@ -1,1915 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016 Red Hat, Inc. -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import base64 -import calendar -import contextlib -import datetime -from email import utils as email_utils -import hashlib -import json -import uuid - -import iso8601 -from keystonemiddleware import fixture as ksm_fixture -import mock -import six -from stevedore import extension -import testscenarios -from testtools import testcase -import webtest - -from gnocchi import archive_policy -from gnocchi import rest -from gnocchi.rest import app -from gnocchi.tests import base as tests_base -from gnocchi.tests import utils as tests_utils -from gnocchi import utils - - -load_tests = testscenarios.load_tests_apply_scenarios - - -class TestingApp(webtest.TestApp): - VALID_TOKEN_ADMIN = str(uuid.uuid4()) - USER_ID_ADMIN = str(uuid.uuid4()) - PROJECT_ID_ADMIN = str(uuid.uuid4()) - - VALID_TOKEN = str(uuid.uuid4()) - USER_ID = str(uuid.uuid4()) - PROJECT_ID = str(uuid.uuid4()) - - VALID_TOKEN_2 = str(uuid.uuid4()) - USER_ID_2 = str(uuid.uuid4()) - PROJECT_ID_2 = str(uuid.uuid4()) - - INVALID_TOKEN = str(uuid.uuid4()) - - def __init__(self, *args, **kwargs): - self.auth_mode = kwargs.pop('auth_mode') - self.storage = kwargs.pop('storage') - self.indexer = kwargs.pop('indexer') - super(TestingApp, self).__init__(*args, **kwargs) - # Setup Keystone auth_token fake cache - self.token = self.VALID_TOKEN - # Setup default user for basic auth - self.user = self.USER_ID.encode('ascii') - - @contextlib.contextmanager - def use_admin_user(self): - if self.auth_mode == "keystone": - old_token = self.token - self.token = self.VALID_TOKEN_ADMIN - try: - yield - finally: - self.token = old_token - elif self.auth_mode == "basic": - old_user = self.user - self.user = b"admin" - try: - yield - finally: - self.user = old_user - elif self.auth_mode == "noauth": - raise testcase.TestSkipped("auth mode is noauth") - else: - raise RuntimeError("Unknown auth_mode") - - @contextlib.contextmanager - def use_another_user(self): - if self.auth_mode != "keystone": - raise testcase.TestSkipped("Auth mode is not Keystone") - old_token = self.token - self.token = self.VALID_TOKEN_2 - try: - yield - finally: - self.token = old_token - - @contextlib.contextmanager - def use_invalid_token(self): - if self.auth_mode != "keystone": - raise testcase.TestSkipped("Auth mode is not Keystone") - old_token = self.token - self.token = self.INVALID_TOKEN - try: - yield - finally: - self.token = old_token - - def do_request(self, req, *args, **kwargs): - if self.auth_mode in "keystone": - if self.token is not None: - req.headers['X-Auth-Token'] = self.token - elif self.auth_mode == "basic": - req.headers['Authorization'] = ( - b"basic " + base64.b64encode(self.user + b":") - ) - elif self.auth_mode == "noauth": - req.headers['X-User-Id'] = self.USER_ID - req.headers['X-Project-Id'] = self.PROJECT_ID - response = super(TestingApp, self).do_request(req, *args, **kwargs) - metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming) - self.storage.process_background_tasks(self.indexer, metrics, sync=True) - return response - - -class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios): - - scenarios = [ - ('basic', dict(auth_mode="basic")), - ('keystone', dict(auth_mode="keystone")), - ('noauth', dict(auth_mode="noauth")), - ] - - def setUp(self): - super(RestTest, self).setUp() - - if self.auth_mode == "keystone": - self.auth_token_fixture = self.useFixture( - ksm_fixture.AuthTokenFixture()) - self.auth_token_fixture.add_token_data( - is_v2=True, - token_id=TestingApp.VALID_TOKEN_ADMIN, - user_id=TestingApp.USER_ID_ADMIN, - user_name='adminusername', - project_id=TestingApp.PROJECT_ID_ADMIN, - role_list=['admin']) - self.auth_token_fixture.add_token_data( - is_v2=True, - token_id=TestingApp.VALID_TOKEN, - user_id=TestingApp.USER_ID, - user_name='myusername', - project_id=TestingApp.PROJECT_ID, - role_list=["member"]) - self.auth_token_fixture.add_token_data( - is_v2=True, - token_id=TestingApp.VALID_TOKEN_2, - user_id=TestingApp.USER_ID_2, - user_name='myusername2', - project_id=TestingApp.PROJECT_ID_2, - role_list=["member"]) - - self.conf.set_override("auth_mode", self.auth_mode, group="api") - - self.app = TestingApp(app.load_app(conf=self.conf, - indexer=self.index, - storage=self.storage, - not_implemented_middleware=False), - storage=self.storage, - indexer=self.index, - auth_mode=self.auth_mode) - - # NOTE(jd) Used at least by docs - @staticmethod - def runTest(): - pass - - -class RootTest(RestTest): - def test_deserialize_force_json(self): - with self.app.use_admin_user(): - self.app.post( - "/v1/archive_policy", - params="foo", - status=415) - - def test_capabilities(self): - custom_agg = extension.Extension('test_aggregation', None, None, None) - mgr = extension.ExtensionManager.make_test_instance( - [custom_agg], 'gnocchi.aggregates') - aggregation_methods = set( - archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS) - - with mock.patch.object(extension, 'ExtensionManager', - return_value=mgr): - result = self.app.get("/v1/capabilities").json - self.assertEqual( - sorted(aggregation_methods), - sorted(result['aggregation_methods'])) - self.assertEqual( - ['test_aggregation'], - result['dynamic_aggregation_methods']) - - def test_status(self): - with self.app.use_admin_user(): - r = self.app.get("/v1/status") - status = json.loads(r.text) - self.assertIsInstance(status['storage']['measures_to_process'], dict) - self.assertIsInstance(status['storage']['summary']['metrics'], int) - self.assertIsInstance(status['storage']['summary']['measures'], int) - - -class ArchivePolicyTest(RestTest): - """Test the ArchivePolicies REST API. - - See also gnocchi/tests/gabbi/gabbits/archive.yaml - """ - - # TODO(chdent): The tests left here involve inspecting the - # aggregation methods which gabbi can't currently handle because - # the ordering of the results is not predictable. - - def test_post_archive_policy_with_agg_methods(self): - name = str(uuid.uuid4()) - with self.app.use_admin_user(): - result = self.app.post_json( - "/v1/archive_policy", - params={"name": name, - "aggregation_methods": ["mean"], - "definition": - [{ - "granularity": "1 minute", - "points": 20, - }]}, - status=201) - self.assertEqual("application/json", result.content_type) - ap = json.loads(result.text) - self.assertEqual(['mean'], ap['aggregation_methods']) - - def test_post_archive_policy_with_agg_methods_minus(self): - name = str(uuid.uuid4()) - with self.app.use_admin_user(): - result = self.app.post_json( - "/v1/archive_policy", - params={"name": name, - "aggregation_methods": ["-mean"], - "definition": - [{ - "granularity": "1 minute", - "points": 20, - }]}, - status=201) - self.assertEqual("application/json", result.content_type) - ap = json.loads(result.text) - self.assertEqual( - (set(self.conf.archive_policy.default_aggregation_methods) - - set(['mean'])), - set(ap['aggregation_methods'])) - - def test_get_archive_policy(self): - result = self.app.get("/v1/archive_policy/medium") - ap = json.loads(result.text) - ap_dict = self.archive_policies['medium'].jsonify() - ap_dict['definition'] = [ - archive_policy.ArchivePolicyItem(**d).jsonify() - for d in ap_dict['definition'] - ] - self.assertEqual(set(ap['aggregation_methods']), - ap_dict['aggregation_methods']) - del ap['aggregation_methods'] - del ap_dict['aggregation_methods'] - self.assertEqual(ap_dict, ap) - - def test_list_archive_policy(self): - result = self.app.get("/v1/archive_policy") - aps = json.loads(result.text) - # Transform list to set - for ap in aps: - ap['aggregation_methods'] = set(ap['aggregation_methods']) - for name, ap in six.iteritems(self.archive_policies): - apj = ap.jsonify() - apj['definition'] = [ - archive_policy.ArchivePolicyItem(**d).jsonify() - for d in ap.definition - ] - self.assertIn(apj, aps) - - -class MetricTest(RestTest): - - def test_get_metric_with_another_user_linked_resource(self): - result = self.app.post_json( - "/v1/resource/generic", - params={ - "id": str(uuid.uuid4()), - "started_at": "2014-01-01 02:02:02", - "user_id": TestingApp.USER_ID_2, - "project_id": TestingApp.PROJECT_ID_2, - "metrics": {"foobar": {"archive_policy_name": "low"}}, - }) - resource = json.loads(result.text) - metric_id = resource["metrics"]["foobar"] - with self.app.use_another_user(): - self.app.get("/v1/metric/%s" % metric_id) - - def test_get_metric_with_another_user(self): - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "medium"}, - status=201) - self.assertEqual("application/json", result.content_type) - - with self.app.use_another_user(): - self.app.get(result.headers['Location'], status=403) - - def test_post_archive_policy_no_mean(self): - """Test that we have a 404 if mean is not in AP.""" - ap = str(uuid.uuid4()) - with self.app.use_admin_user(): - self.app.post_json( - "/v1/archive_policy", - params={"name": ap, - "aggregation_methods": ["max"], - "definition": [{ - "granularity": "10s", - "points": 20, - }]}, - status=201) - result = self.app.post_json( - "/v1/metric", - params={"archive_policy_name": ap}, - status=201) - metric = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric['id'], - params=[{"timestamp": '2013-01-01 12:00:01', - "value": 8}, - {"timestamp": '2013-01-01 12:00:02', - "value": 16}]) - self.app.get("/v1/metric/%s/measures" % metric['id'], - status=404) - - def test_delete_metric_another_user(self): - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "medium"}) - metric = json.loads(result.text) - with self.app.use_another_user(): - self.app.delete("/v1/metric/" + metric['id'], status=403) - - def test_add_measure_with_another_user(self): - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "high"}) - metric = json.loads(result.text) - with self.app.use_another_user(): - self.app.post_json( - "/v1/metric/%s/measures" % metric['id'], - params=[{"timestamp": '2013-01-01 23:23:23', - "value": 1234.2}], - status=403) - - def test_add_measures_back_window(self): - ap_name = str(uuid.uuid4()) - with self.app.use_admin_user(): - self.app.post_json( - "/v1/archive_policy", - params={"name": ap_name, - "back_window": 2, - "definition": - [{ - "granularity": "1 minute", - "points": 20, - }]}, - status=201) - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": ap_name}) - metric = json.loads(result.text) - self.app.post_json( - "/v1/metric/%s/measures" % metric['id'], - params=[{"timestamp": '2013-01-01 23:30:23', - "value": 1234.2}], - status=202) - self.app.post_json( - "/v1/metric/%s/measures" % metric['id'], - params=[{"timestamp": '2013-01-01 23:29:23', - "value": 1234.2}], - status=202) - self.app.post_json( - "/v1/metric/%s/measures" % metric['id'], - params=[{"timestamp": '2013-01-01 23:28:23', - "value": 1234.2}], - status=202) - # This one is too old and should not be taken into account - self.app.post_json( - "/v1/metric/%s/measures" % metric['id'], - params=[{"timestamp": '2012-01-01 23:27:23', - "value": 1234.2}], - status=202) - - ret = self.app.get("/v1/metric/%s/measures" % metric['id']) - result = json.loads(ret.text) - self.assertEqual( - [[u'2013-01-01T23:28:00+00:00', 60.0, 1234.2], - [u'2013-01-01T23:29:00+00:00', 60.0, 1234.2], - [u'2013-01-01T23:30:00+00:00', 60.0, 1234.2]], - result) - - def test_get_measure_with_another_user(self): - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "low"}) - metric = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric['id'], - params=[{"timestamp": '2013-01-01 23:23:23', - "value": 1234.2}]) - with self.app.use_another_user(): - self.app.get("/v1/metric/%s/measures" % metric['id'], - status=403) - - @mock.patch.object(utils, 'utcnow') - def test_get_measure_start_relative(self, utcnow): - """Make sure the timestamps can be relative to now.""" - utcnow.return_value = datetime.datetime(2014, 1, 1, 10, 23) - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "high"}) - metric = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric['id'], - params=[{"timestamp": utils.utcnow().isoformat(), - "value": 1234.2}]) - ret = self.app.get( - "/v1/metric/%s/measures?start=-10 minutes" - % metric['id'], - status=200) - result = json.loads(ret.text) - now = utils.datetime_utc(2014, 1, 1, 10, 23) - self.assertEqual([ - ['2014-01-01T10:00:00+00:00', 3600.0, 1234.2], - [(now - - datetime.timedelta( - seconds=now.second, - microseconds=now.microsecond)).isoformat(), - 60.0, 1234.2], - [(now - - datetime.timedelta( - microseconds=now.microsecond)).isoformat(), - 1.0, 1234.2]], result) - - def test_get_measure_stop(self): - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "high"}) - metric = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric['id'], - params=[{"timestamp": '2013-01-01 12:00:00', - "value": 1234.2}, - {"timestamp": '2013-01-01 12:00:02', - "value": 456}]) - ret = self.app.get("/v1/metric/%s/measures" - "?stop=2013-01-01 12:00:01" % metric['id'], - status=200) - result = json.loads(ret.text) - self.assertEqual( - [[u'2013-01-01T12:00:00+00:00', 3600.0, 845.1], - [u'2013-01-01T12:00:00+00:00', 60.0, 845.1], - [u'2013-01-01T12:00:00+00:00', 1.0, 1234.2]], - result) - - def test_get_measure_aggregation(self): - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "medium"}) - metric = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric['id'], - params=[{"timestamp": '2013-01-01 12:00:01', - "value": 123.2}, - {"timestamp": '2013-01-01 12:00:03', - "value": 12345.2}, - {"timestamp": '2013-01-01 12:00:02', - "value": 1234.2}]) - ret = self.app.get( - "/v1/metric/%s/measures?aggregation=max" % metric['id'], - status=200) - result = json.loads(ret.text) - self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 12345.2], - [u'2013-01-01T12:00:00+00:00', 3600.0, 12345.2], - [u'2013-01-01T12:00:00+00:00', 60.0, 12345.2]], - result) - - def test_get_moving_average(self): - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "medium"}) - metric = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric['id'], - params=[{"timestamp": '2013-01-01 12:00:00', - "value": 69}, - {"timestamp": '2013-01-01 12:00:20', - "value": 42}, - {"timestamp": '2013-01-01 12:00:40', - "value": 6}, - {"timestamp": '2013-01-01 12:01:00', - "value": 44}, - {"timestamp": '2013-01-01 12:01:20', - "value": 7}]) - - path = "/v1/metric/%s/measures?aggregation=%s&window=%ds" - ret = self.app.get(path % (metric['id'], 'moving-average', 120), - status=200) - result = json.loads(ret.text) - expected = [[u'2013-01-01T12:00:00+00:00', 120.0, 32.25]] - self.assertEqual(expected, result) - ret = self.app.get(path % (metric['id'], 'moving-average', 90), - status=400) - self.assertIn('No data available that is either full-res', - ret.text) - path = "/v1/metric/%s/measures?aggregation=%s" - ret = self.app.get(path % (metric['id'], 'moving-average'), - status=400) - self.assertIn('Moving aggregate must have window specified', - ret.text) - - def test_get_moving_average_invalid_window(self): - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "medium"}) - metric = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric['id'], - params=[{"timestamp": '2013-01-01 12:00:00', - "value": 69}, - {"timestamp": '2013-01-01 12:00:20', - "value": 42}, - {"timestamp": '2013-01-01 12:00:40', - "value": 6}, - {"timestamp": '2013-01-01 12:01:00', - "value": 44}, - {"timestamp": '2013-01-01 12:01:20', - "value": 7}]) - - path = "/v1/metric/%s/measures?aggregation=%s&window=foobar" - ret = self.app.get(path % (metric['id'], 'moving-average'), - status=400) - self.assertIn('Invalid value for window', ret.text) - - def test_get_resource_missing_named_metric_measure_aggregation(self): - mgr = self.index.get_resource_type_schema() - resource_type = str(uuid.uuid4()) - self.index.create_resource_type( - mgr.resource_type_from_dict(resource_type, { - "server_group": {"type": "string", - "min_length": 1, - "max_length": 40, - "required": True} - }, 'creating')) - - attributes = { - "server_group": str(uuid.uuid4()), - } - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "medium"}) - metric1 = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric1['id'], - params=[{"timestamp": '2013-01-01 12:00:01', - "value": 8}, - {"timestamp": '2013-01-01 12:00:02', - "value": 16}]) - - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "medium"}) - metric2 = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric2['id'], - params=[{"timestamp": '2013-01-01 12:00:01', - "value": 0}, - {"timestamp": '2013-01-01 12:00:02', - "value": 4}]) - - attributes['id'] = str(uuid.uuid4()) - attributes['metrics'] = {'foo': metric1['id']} - self.app.post_json("/v1/resource/" + resource_type, - params=attributes) - - attributes['id'] = str(uuid.uuid4()) - attributes['metrics'] = {'bar': metric2['id']} - self.app.post_json("/v1/resource/" + resource_type, - params=attributes) - - result = self.app.post_json( - "/v1/aggregation/resource/%s/metric/foo?aggregation=max" - % resource_type, - params={"=": {"server_group": attributes['server_group']}}) - - measures = json.loads(result.text) - self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 16.0], - [u'2013-01-01T12:00:00+00:00', 3600.0, 16.0], - [u'2013-01-01T12:00:00+00:00', 60.0, 16.0]], - measures) - - def test_search_value(self): - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "high"}) - metric = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric['id'], - params=[{"timestamp": '2013-01-01 12:00:00', - "value": 1234.2}, - {"timestamp": '2013-01-01 12:00:02', - "value": 456}]) - metric1 = metric['id'] - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "high"}) - metric = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric['id'], - params=[{"timestamp": '2013-01-01 12:30:00', - "value": 1234.2}, - {"timestamp": '2013-01-01 12:00:02', - "value": 456}]) - metric2 = metric['id'] - - ret = self.app.post_json( - "/v1/search/metric?metric_id=%s&metric_id=%s" - "&stop=2013-01-01 12:10:00" % (metric1, metric2), - params={u"∧": [{u"≥": 1000}]}, - status=200) - result = json.loads(ret.text) - self.assertEqual( - {metric1: [[u'2013-01-01T12:00:00+00:00', 1.0, 1234.2]], - metric2: []}, - result) - - -class ResourceTest(RestTest): - def setUp(self): - super(ResourceTest, self).setUp() - self.attributes = { - "id": str(uuid.uuid4()), - "started_at": "2014-01-03T02:02:02+00:00", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - "name": "my-name", - } - self.patchable_attributes = { - "ended_at": "2014-01-03T02:02:02+00:00", - "name": "new-name", - } - self.resource = self.attributes.copy() - # Set original_resource_id - self.resource['original_resource_id'] = self.resource['id'] - self.resource['created_by_user_id'] = TestingApp.USER_ID - if self.auth_mode in ("keystone", "noauth"): - self.resource['created_by_project_id'] = TestingApp.PROJECT_ID - self.resource['creator'] = ( - TestingApp.USER_ID + ":" + TestingApp.PROJECT_ID - ) - elif self.auth_mode == "basic": - self.resource['created_by_project_id'] = "" - self.resource['creator'] = TestingApp.USER_ID - self.resource['ended_at'] = None - self.resource['metrics'] = {} - if 'user_id' not in self.resource: - self.resource['user_id'] = None - if 'project_id' not in self.resource: - self.resource['project_id'] = None - - mgr = self.index.get_resource_type_schema() - self.resource_type = str(uuid.uuid4()) - self.index.create_resource_type( - mgr.resource_type_from_dict(self.resource_type, { - "name": {"type": "string", - "min_length": 1, - "max_length": 40, - "required": True} - }, "creating")) - self.resource['type'] = self.resource_type - - @mock.patch.object(utils, 'utcnow') - def test_post_resource(self, utcnow): - utcnow.return_value = utils.datetime_utc(2014, 1, 1, 10, 23) - result = self.app.post_json( - "/v1/resource/" + self.resource_type, - params=self.attributes, - status=201) - resource = json.loads(result.text) - self.assertEqual("http://localhost/v1/resource/" - + self.resource_type + "/" + self.attributes['id'], - result.headers['Location']) - self.assertIsNone(resource['revision_end']) - self.assertEqual(resource['revision_start'], - "2014-01-01T10:23:00+00:00") - self._check_etag(result, resource) - del resource['revision_start'] - del resource['revision_end'] - self.assertEqual(self.resource, resource) - - def test_post_resource_with_invalid_metric(self): - metric_id = str(uuid.uuid4()) - self.attributes['metrics'] = {"foo": metric_id} - result = self.app.post_json( - "/v1/resource/" + self.resource_type, - params=self.attributes, - status=400) - self.assertIn("Metric %s does not exist" % metric_id, - result.text) - - def test_post_resource_with_metric_from_other_user(self): - with self.app.use_another_user(): - metric = self.app.post_json( - "/v1/metric", - params={'archive_policy_name': "high"}) - metric_id = json.loads(metric.text)['id'] - self.attributes['metrics'] = {"foo": metric_id} - result = self.app.post_json( - "/v1/resource/" + self.resource_type, - params=self.attributes, - status=400) - self.assertIn("Metric %s does not exist" % metric_id, - result.text) - - def test_post_resource_already_exist(self): - result = self.app.post_json( - "/v1/resource/" + self.resource_type, - params=self.attributes, - status=201) - result = self.app.post_json( - "/v1/resource/" + self.resource_type, - params=self.attributes, - status=409) - self.assertIn("Resource %s already exists" % self.attributes['id'], - result.text) - - def test_post_invalid_timestamp(self): - self.attributes['started_at'] = "2014-01-01 02:02:02" - self.attributes['ended_at'] = "2013-01-01 02:02:02" - self.app.post_json( - "/v1/resource/" + self.resource_type, - params=self.attributes, - status=400) - - @staticmethod - def _strtime_to_httpdate(dt): - return email_utils.formatdate(calendar.timegm( - iso8601.parse_date(dt).timetuple()), usegmt=True) - - def _check_etag(self, response, resource): - lastmodified = self._strtime_to_httpdate(resource['revision_start']) - etag = hashlib.sha1() - etag.update(resource['id'].encode('utf-8')) - etag.update(resource['revision_start'].encode('utf8')) - self.assertEqual(response.headers['Last-Modified'], lastmodified) - self.assertEqual(response.headers['ETag'], '"%s"' % etag.hexdigest()) - - @mock.patch.object(utils, 'utcnow') - def test_get_resource(self, utcnow): - utcnow.return_value = utils.datetime_utc(2014, 1, 1, 10, 23) - result = self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes, - status=201) - result = self.app.get("/v1/resource/" - + self.resource_type - + "/" - + self.attributes['id']) - resource = json.loads(result.text) - self.assertIsNone(resource['revision_end']) - self.assertEqual(resource['revision_start'], - "2014-01-01T10:23:00+00:00") - self._check_etag(result, resource) - del resource['revision_start'] - del resource['revision_end'] - self.assertEqual(self.resource, resource) - - def test_get_resource_etag(self): - result = self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes, - status=201) - result = self.app.get("/v1/resource/" - + self.resource_type - + "/" - + self.attributes['id']) - resource = json.loads(result.text) - etag = hashlib.sha1() - etag.update(resource['id'].encode('utf-8')) - etag.update(resource['revision_start'].encode('utf-8')) - etag = etag.hexdigest() - lastmodified = self._strtime_to_httpdate(resource['revision_start']) - oldlastmodified = self._strtime_to_httpdate("2000-01-01 00:00:00") - - # if-match and if-unmodified-since - self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-match': 'fake'}, - status=412) - self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-match': etag}, - status=200) - self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-unmodified-since': lastmodified}, - status=200) - self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-unmodified-since': oldlastmodified}, - status=412) - # Some case with '*' - self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-none-match': '*'}, - status=304) - self.app.get("/v1/resource/" + self.resource_type - + "/wrongid", - headers={'if-none-match': '*'}, - status=404) - # always prefers if-match if both provided - self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-match': etag, - 'if-unmodified-since': lastmodified}, - status=200) - self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-match': etag, - 'if-unmodified-since': oldlastmodified}, - status=200) - self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-match': '*', - 'if-unmodified-since': oldlastmodified}, - status=200) - - # if-none-match and if-modified-since - self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-none-match': etag}, - status=304) - self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-none-match': 'fake'}, - status=200) - self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-modified-since': lastmodified}, - status=304) - self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-modified-since': oldlastmodified}, - status=200) - # always prefers if-none-match if both provided - self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-modified-since': oldlastmodified, - 'if-none-match': etag}, - status=304) - self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-modified-since': oldlastmodified, - 'if-none-match': '*'}, - status=304) - self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-modified-since': lastmodified, - 'if-none-match': '*'}, - status=304) - # Some case with '*' - self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-match': '*'}, - status=200) - self.app.get("/v1/resource/" + self.resource_type - + "/wrongid", - headers={'if-match': '*'}, - status=404) - - # if-none-match and if-match - self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-none-match': etag, - 'if-match': etag}, - status=304) - - # if-none-match returns 412 instead 304 for PUT/PATCH/DELETE - self.app.patch_json("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-none-match': '*'}, - status=412) - self.app.delete("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-none-match': '*'}, - status=412) - - # if-modified-since is ignored with PATCH/PUT/DELETE - self.app.patch_json("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - params=self.patchable_attributes, - headers={'if-modified-since': lastmodified}, - status=200) - self.app.delete("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - headers={'if-modified-since': lastmodified}, - status=204) - - def test_get_resource_non_admin(self): - with self.app.use_another_user(): - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes, - status=201) - self.app.get("/v1/resource/" - + self.resource_type - + "/" - + self.attributes['id'], - status=200) - - def test_get_resource_unauthorized(self): - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes, - status=201) - with self.app.use_another_user(): - self.app.get("/v1/resource/" - + self.resource_type - + "/" - + self.attributes['id'], - status=403) - - def test_get_resource_named_metric(self): - self.attributes['metrics'] = {'foo': {'archive_policy_name': "high"}} - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - self.app.get("/v1/resource/" - + self.resource_type - + "/" - + self.attributes['id'] - + "/metric/foo/measures", - status=200) - - def test_list_resource_metrics_unauthorized(self): - self.attributes['metrics'] = {'foo': {'archive_policy_name': "high"}} - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - with self.app.use_another_user(): - self.app.get( - "/v1/resource/" + self.resource_type - + "/" + self.attributes['id'] + "/metric", - status=403) - - def test_delete_resource_named_metric(self): - self.attributes['metrics'] = {'foo': {'archive_policy_name': "high"}} - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - self.app.delete("/v1/resource/" - + self.resource_type - + "/" - + self.attributes['id'] - + "/metric/foo", - status=204) - self.app.delete("/v1/resource/" - + self.resource_type - + "/" - + self.attributes['id'] - + "/metric/foo/measures", - status=404) - - def test_get_resource_unknown_named_metric(self): - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - self.app.get("/v1/resource/" - + self.resource_type - + "/" - + self.attributes['id'] - + "/metric/foo", - status=404) - - def test_post_append_metrics_already_exists(self): - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - - metrics = {'foo': {'archive_policy_name': "high"}} - self.app.post_json("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'] + "/metric", - params=metrics, status=204) - metrics = {'foo': {'archive_policy_name': "low"}} - self.app.post_json("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'] - + "/metric", - params=metrics, - status=409) - - result = self.app.get("/v1/resource/" - + self.resource_type + "/" - + self.attributes['id']) - result = json.loads(result.text) - self.assertTrue(uuid.UUID(result['metrics']['foo'])) - - def test_post_append_metrics(self): - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - - metrics = {'foo': {'archive_policy_name': "high"}} - self.app.post_json("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'] + "/metric", - params=metrics, status=204) - result = self.app.get("/v1/resource/" - + self.resource_type + "/" - + self.attributes['id']) - result = json.loads(result.text) - self.assertTrue(uuid.UUID(result['metrics']['foo'])) - - def test_post_append_metrics_created_by_different_user(self): - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - with self.app.use_another_user(): - metric = self.app.post_json( - "/v1/metric", - params={'archive_policy_name': "high"}) - metric_id = json.loads(metric.text)['id'] - result = self.app.post_json("/v1/resource/" + self.resource_type - + "/" + self.attributes['id'] + "/metric", - params={str(uuid.uuid4()): metric_id}, - status=400) - self.assertIn("Metric %s does not exist" % metric_id, result.text) - - @mock.patch.object(utils, 'utcnow') - def test_patch_resource_metrics(self, utcnow): - utcnow.return_value = utils.datetime_utc(2014, 1, 1, 10, 23) - result = self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes, - status=201) - r = json.loads(result.text) - - utcnow.return_value = utils.datetime_utc(2014, 1, 2, 6, 49) - new_metrics = {'foo': {'archive_policy_name': "medium"}} - self.app.patch_json( - "/v1/resource/" + self.resource_type + "/" - + self.attributes['id'], - params={'metrics': new_metrics}, - status=200) - result = self.app.get("/v1/resource/" - + self.resource_type + "/" - + self.attributes['id']) - result = json.loads(result.text) - self.assertTrue(uuid.UUID(result['metrics']['foo'])) - self.assertIsNone(result['revision_end']) - self.assertIsNone(r['revision_end']) - self.assertEqual(result['revision_start'], "2014-01-01T10:23:00+00:00") - self.assertEqual(r['revision_start'], "2014-01-01T10:23:00+00:00") - - del result['metrics'] - del result['revision_start'] - del result['revision_end'] - del r['metrics'] - del r['revision_start'] - del r['revision_end'] - self.assertEqual(r, result) - - def test_patch_resource_existent_metrics_from_another_user(self): - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - with self.app.use_another_user(): - result = self.app.post_json( - "/v1/metric", - params={'archive_policy_name': "medium"}) - metric_id = json.loads(result.text)['id'] - result = self.app.patch_json( - "/v1/resource/" - + self.resource_type - + "/" - + self.attributes['id'], - params={'metrics': {'foo': metric_id}}, - status=400) - self.assertIn("Metric %s does not exist" % metric_id, result.text) - result = self.app.get("/v1/resource/" - + self.resource_type - + "/" - + self.attributes['id']) - result = json.loads(result.text) - self.assertEqual({}, result['metrics']) - - def test_patch_resource_non_existent_metrics(self): - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes, - status=201) - e1 = str(uuid.uuid4()) - result = self.app.patch_json( - "/v1/resource/" - + self.resource_type - + "/" - + self.attributes['id'], - params={'metrics': {'foo': e1}}, - status=400) - self.assertIn("Metric %s does not exist" % e1, result.text) - result = self.app.get("/v1/resource/" - + self.resource_type - + "/" - + self.attributes['id']) - result = json.loads(result.text) - self.assertEqual({}, result['metrics']) - - @mock.patch.object(utils, 'utcnow') - def test_patch_resource_attributes(self, utcnow): - utcnow.return_value = utils.datetime_utc(2014, 1, 1, 10, 23) - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes, - status=201) - utcnow.return_value = utils.datetime_utc(2014, 1, 2, 6, 48) - presponse = self.app.patch_json( - "/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - params=self.patchable_attributes, - status=200) - response = self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id']) - result = json.loads(response.text) - presult = json.loads(presponse.text) - self.assertEqual(result, presult) - for k, v in six.iteritems(self.patchable_attributes): - self.assertEqual(v, result[k]) - self.assertIsNone(result['revision_end']) - self.assertEqual(result['revision_start'], - "2014-01-02T06:48:00+00:00") - self._check_etag(response, result) - - # Check the history - history = self.app.post_json( - "/v1/search/resource/" + self.resource_type, - headers={"Accept": "application/json; history=true"}, - params={"=": {"id": result['id']}}, - status=200) - history = json.loads(history.text) - self.assertGreaterEqual(len(history), 2) - self.assertEqual(result, history[1]) - - h = history[0] - for k, v in six.iteritems(self.attributes): - self.assertEqual(v, h[k]) - self.assertEqual(h['revision_end'], - "2014-01-02T06:48:00+00:00") - self.assertEqual(h['revision_start'], - "2014-01-01T10:23:00+00:00") - - def test_patch_resource_attributes_unauthorized(self): - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes, - status=201) - with self.app.use_another_user(): - self.app.patch_json( - "/v1/resource/" + self.resource_type - + "/" + self.attributes['id'], - params=self.patchable_attributes, - status=403) - - def test_patch_resource_ended_at_before_started_at(self): - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes, - status=201) - self.app.patch_json( - "/v1/resource/" - + self.resource_type - + "/" - + self.attributes['id'], - params={'ended_at': "2000-05-05 23:23:23"}, - status=400) - - def test_patch_resource_no_partial_update(self): - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes, - status=201) - e1 = str(uuid.uuid4()) - result = self.app.patch_json( - "/v1/resource/" + self.resource_type + "/" - + self.attributes['id'], - params={'ended_at': "2044-05-05 23:23:23", - 'metrics': {"foo": e1}}, - status=400) - self.assertIn("Metric %s does not exist" % e1, result.text) - result = self.app.get("/v1/resource/" - + self.resource_type + "/" - + self.attributes['id']) - result = json.loads(result.text) - del result['revision_start'] - del result['revision_end'] - self.assertEqual(self.resource, result) - - def test_patch_resource_non_existent(self): - self.app.patch_json( - "/v1/resource/" + self.resource_type - + "/" + str(uuid.uuid4()), - params={}, - status=404) - - def test_patch_resource_non_existent_with_body(self): - self.app.patch_json( - "/v1/resource/" + self.resource_type - + "/" + str(uuid.uuid4()), - params=self.patchable_attributes, - status=404) - - def test_patch_resource_unknown_field(self): - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - result = self.app.patch_json( - "/v1/resource/" + self.resource_type + "/" - + self.attributes['id'], - params={'foobar': 123}, - status=400) - self.assertIn(b'Invalid input: extra keys not allowed @ data[' - + repr(u'foobar').encode('ascii') + b"]", - result.body) - - def test_delete_resource(self): - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - self.app.get("/v1/resource/" + self.resource_type + "/" - + self.attributes['id'], - status=200) - self.app.delete("/v1/resource/" + self.resource_type + "/" - + self.attributes['id'], - status=204) - self.app.get("/v1/resource/" + self.resource_type + "/" - + self.attributes['id'], - status=404) - - def test_delete_resource_with_metrics(self): - metric = self.app.post_json( - "/v1/metric", - params={'archive_policy_name': "high"}) - metric_id = json.loads(metric.text)['id'] - metric_name = six.text_type(uuid.uuid4()) - self.attributes['metrics'] = {metric_name: metric_id} - self.app.get("/v1/metric/" + metric_id, - status=200) - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - self.app.get("/v1/resource/" + self.resource_type + "/" - + self.attributes['id'], - status=200) - self.app.delete("/v1/resource/" + self.resource_type + "/" - + self.attributes['id'], - status=204) - self.app.get("/v1/resource/" + self.resource_type + "/" - + self.attributes['id'], - status=404) - self.app.get("/v1/metric/" + metric_id, - status=404) - - def test_delete_resource_unauthorized(self): - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - with self.app.use_another_user(): - self.app.delete("/v1/resource/" + self.resource_type + "/" - + self.attributes['id'], - status=403) - - def test_delete_resource_non_existent(self): - result = self.app.delete("/v1/resource/" + self.resource_type + "/" - + self.attributes['id'], - status=404) - self.assertIn( - "Resource %s does not exist" % self.attributes['id'], - result.text) - - def test_post_resource_with_metrics(self): - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "medium"}) - metric = json.loads(result.text) - self.attributes['metrics'] = {"foo": metric['id']} - result = self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes, - status=201) - resource = json.loads(result.text) - self.assertEqual("http://localhost/v1/resource/" - + self.resource_type + "/" - + self.attributes['id'], - result.headers['Location']) - self.resource['metrics'] = self.attributes['metrics'] - del resource['revision_start'] - del resource['revision_end'] - self.assertEqual(self.resource, resource) - - def test_post_resource_with_null_metrics(self): - self.attributes['metrics'] = {"foo": {"archive_policy_name": "low"}} - result = self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes, - status=201) - resource = json.loads(result.text) - self.assertEqual("http://localhost/v1/resource/" - + self.resource_type + "/" - + self.attributes['id'], - result.headers['Location']) - self.assertEqual(self.attributes['id'], resource["id"]) - metric_id = uuid.UUID(resource['metrics']['foo']) - result = self.app.get("/v1/metric/" + str(metric_id) + "/measures", - status=200) - - def test_search_datetime(self): - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes, - status=201) - result = self.app.get("/v1/resource/" + self.resource_type - + "/" + self.attributes['id']) - result = json.loads(result.text) - - resources = self.app.post_json( - "/v1/search/resource/" + self.resource_type, - params={"and": [{"=": {"id": result['id']}}, - {"=": {"ended_at": None}}]}, - status=200) - resources = json.loads(resources.text) - self.assertGreaterEqual(len(resources), 1) - self.assertEqual(result, resources[0]) - - resources = self.app.post_json( - "/v1/search/resource/" + self.resource_type, - headers={"Accept": "application/json; history=true"}, - params={"and": [ - {"=": {"id": result['id']}}, - {"or": [{">=": {"revision_end": '2014-01-03T02:02:02'}}, - {"=": {"revision_end": None}}]} - ]}, - status=200) - resources = json.loads(resources.text) - self.assertGreaterEqual(len(resources), 1) - self.assertEqual(result, resources[0]) - - def test_search_resource_by_original_resource_id(self): - result = self.app.post_json( - "/v1/resource/" + self.resource_type, - params=self.attributes) - created_resource = json.loads(result.text) - original_id = created_resource['original_resource_id'] - result = self.app.post_json( - "/v1/search/resource/" + self.resource_type, - params={"eq": {"original_resource_id": original_id}}, - status=200) - resources = json.loads(result.text) - self.assertGreaterEqual(len(resources), 1) - self.assertEqual(created_resource, resources[0]) - - def test_search_resources_by_user(self): - u1 = str(uuid.uuid4()) - self.attributes['user_id'] = u1 - result = self.app.post_json( - "/v1/resource/" + self.resource_type, - params=self.attributes) - created_resource = json.loads(result.text) - result = self.app.post_json("/v1/search/resource/generic", - params={"eq": {"user_id": u1}}, - status=200) - resources = json.loads(result.text) - self.assertGreaterEqual(len(resources), 1) - result = self.app.post_json( - "/v1/search/resource/" + self.resource_type, - params={"=": {"user_id": u1}}, - status=200) - resources = json.loads(result.text) - self.assertGreaterEqual(len(resources), 1) - self.assertEqual(created_resource, resources[0]) - - def test_search_resources_with_another_project_id(self): - u1 = str(uuid.uuid4()) - result = self.app.post_json( - "/v1/resource/generic", - params={ - "id": str(uuid.uuid4()), - "started_at": "2014-01-01 02:02:02", - "user_id": u1, - "project_id": TestingApp.PROJECT_ID_2, - }) - g = json.loads(result.text) - - with self.app.use_another_user(): - result = self.app.post_json( - "/v1/resource/generic", - params={ - "id": str(uuid.uuid4()), - "started_at": "2014-01-01 03:03:03", - "user_id": u1, - "project_id": str(uuid.uuid4()), - }) - j = json.loads(result.text) - g_found = False - j_found = False - - result = self.app.post_json( - "/v1/search/resource/generic", - params={"=": {"user_id": u1}}, - status=200) - resources = json.loads(result.text) - self.assertGreaterEqual(len(resources), 2) - for r in resources: - if r['id'] == str(g['id']): - self.assertEqual(g, r) - g_found = True - elif r['id'] == str(j['id']): - self.assertEqual(j, r) - j_found = True - if g_found and j_found: - break - else: - self.fail("Some resources were not found") - - def test_search_resources_by_unknown_field(self): - result = self.app.post_json( - "/v1/search/resource/" + self.resource_type, - params={"=": {"foobar": "baz"}}, - status=400) - self.assertIn("Resource type " + self.resource_type - + " has no foobar attribute", - result.text) - - def test_search_resources_started_after(self): - # NOTE(jd) So this test is a bit fuzzy right now as we uses the same - # database for all tests and the tests are running concurrently, but - # for now it'll be better than nothing. - result = self.app.post_json( - "/v1/resource/generic/", - params={ - "id": str(uuid.uuid4()), - "started_at": "2014-01-01 02:02:02", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - }) - g = json.loads(result.text) - result = self.app.post_json( - "/v1/resource/" + self.resource_type, - params=self.attributes) - i = json.loads(result.text) - result = self.app.post_json( - "/v1/search/resource/generic", - params={"≥": {"started_at": "2014-01-01"}}, - status=200) - resources = json.loads(result.text) - self.assertGreaterEqual(len(resources), 2) - - i_found = False - g_found = False - for r in resources: - if r['id'] == str(g['id']): - self.assertEqual(g, r) - g_found = True - elif r['id'] == str(i['id']): - i_found = True - if i_found and g_found: - break - else: - self.fail("Some resources were not found") - - result = self.app.post_json( - "/v1/search/resource/" + self.resource_type, - params={">=": {"started_at": "2014-01-03"}}) - resources = json.loads(result.text) - self.assertGreaterEqual(len(resources), 1) - for r in resources: - if r['id'] == str(i['id']): - self.assertEqual(i, r) - break - else: - self.fail("Some resources were not found") - - def test_list_resources_with_bad_details(self): - result = self.app.get("/v1/resource/generic?details=awesome", - status=400) - self.assertIn( - b"Unable to parse `details': invalid truth value", - result.body) - - def test_list_resources_with_bad_details_in_accept(self): - result = self.app.get("/v1/resource/generic", - headers={ - "Accept": "application/json; details=foo", - }, - status=400) - self.assertIn( - b"Unable to parse `Accept header': invalid truth value", - result.body) - - def _do_test_list_resources_with_detail(self, request): - # NOTE(jd) So this test is a bit fuzzy right now as we uses the same - # database for all tests and the tests are running concurrently, but - # for now it'll be better than nothing. - result = self.app.post_json( - "/v1/resource/generic", - params={ - "id": str(uuid.uuid4()), - "started_at": "2014-01-01 02:02:02", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - }) - g = json.loads(result.text) - result = self.app.post_json( - "/v1/resource/" + self.resource_type, - params=self.attributes) - i = json.loads(result.text) - result = request() - self.assertEqual(200, result.status_code) - resources = json.loads(result.text) - self.assertGreaterEqual(len(resources), 2) - - i_found = False - g_found = False - for r in resources: - if r['id'] == str(g['id']): - self.assertEqual(g, r) - g_found = True - elif r['id'] == str(i['id']): - i_found = True - # Check we got all the details - self.assertEqual(i, r) - if i_found and g_found: - break - else: - self.fail("Some resources were not found") - - result = self.app.get("/v1/resource/" + self.resource_type) - resources = json.loads(result.text) - self.assertGreaterEqual(len(resources), 1) - for r in resources: - if r['id'] == str(i['id']): - self.assertEqual(i, r) - break - else: - self.fail("Some resources were not found") - - def test_list_resources_with_another_project_id(self): - result = self.app.post_json( - "/v1/resource/generic", - params={ - "id": str(uuid.uuid4()), - "started_at": "2014-01-01 02:02:02", - "user_id": TestingApp.USER_ID_2, - "project_id": TestingApp.PROJECT_ID_2, - }) - g = json.loads(result.text) - - with self.app.use_another_user(): - result = self.app.post_json( - "/v1/resource/generic", - params={ - "id": str(uuid.uuid4()), - "started_at": "2014-01-01 03:03:03", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - }) - j = json.loads(result.text) - - g_found = False - j_found = False - - result = self.app.get("/v1/resource/generic") - self.assertEqual(200, result.status_code) - resources = json.loads(result.text) - self.assertGreaterEqual(len(resources), 2) - for r in resources: - if r['id'] == str(g['id']): - self.assertEqual(g, r) - g_found = True - elif r['id'] == str(j['id']): - self.assertEqual(j, r) - j_found = True - if g_found and j_found: - break - else: - self.fail("Some resources were not found") - - def test_list_resources_with_details(self): - self._do_test_list_resources_with_detail( - lambda: self.app.get("/v1/resource/generic?details=true")) - - def test_list_resources_with_details_via_accept(self): - self._do_test_list_resources_with_detail( - lambda: self.app.get( - "/v1/resource/generic", - headers={"Accept": "application/json; details=true"})) - - def test_search_resources_with_details(self): - self._do_test_list_resources_with_detail( - lambda: self.app.post("/v1/search/resource/generic?details=true")) - - def test_search_resources_with_details_via_accept(self): - self._do_test_list_resources_with_detail( - lambda: self.app.post( - "/v1/search/resource/generic", - headers={"Accept": "application/json; details=true"})) - - def test_get_res_named_metric_measure_aggregated_policies_invalid(self): - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "low"}) - metric1 = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric1['id'], - params=[{"timestamp": '2013-01-01 12:00:01', - "value": 16}]) - - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": - "no_granularity_match"}) - metric2 = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric2['id'], - params=[{"timestamp": '2013-01-01 12:00:01', - "value": 4}]) - - # NOTE(sileht): because the database is never cleaned between each test - # we must ensure that the query will not match resources from an other - # test, to achieve this we set a different name on each test. - name = str(uuid.uuid4()) - self.attributes['name'] = name - - self.attributes['metrics'] = {'foo': metric1['id']} - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - - self.attributes['id'] = str(uuid.uuid4()) - self.attributes['metrics'] = {'foo': metric2['id']} - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - - result = self.app.post_json( - "/v1/aggregation/resource/" - + self.resource_type + "/metric/foo?aggregation=max", - params={"=": {"name": name}}, - status=400) - self.assertIn(b"One of the metrics being aggregated doesn't have " - b"matching granularity", - result.body) - - def test_get_res_named_metric_measure_aggregation_nooverlap(self): - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "medium"}) - metric1 = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric1['id'], - params=[{"timestamp": '2013-01-01 12:00:01', - "value": 8}, - {"timestamp": '2013-01-01 12:00:02', - "value": 16}]) - - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "medium"}) - metric2 = json.loads(result.text) - - # NOTE(sileht): because the database is never cleaned between each test - # we must ensure that the query will not match resources from an other - # test, to achieve this we set a different name on each test. - name = str(uuid.uuid4()) - self.attributes['name'] = name - - self.attributes['metrics'] = {'foo': metric1['id']} - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - - self.attributes['id'] = str(uuid.uuid4()) - self.attributes['metrics'] = {'foo': metric2['id']} - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - - result = self.app.post_json( - "/v1/aggregation/resource/" + self.resource_type - + "/metric/foo?aggregation=max", - params={"=": {"name": name}}, - expect_errors=True) - - self.assertEqual(400, result.status_code, result.text) - self.assertIn("No overlap", result.text) - - result = self.app.post_json( - "/v1/aggregation/resource/" - + self.resource_type + "/metric/foo?aggregation=min" - + "&needed_overlap=0", - params={"=": {"name": name}}, - expect_errors=True) - - self.assertEqual(200, result.status_code, result.text) - measures = json.loads(result.text) - self.assertEqual([['2013-01-01T00:00:00+00:00', 86400.0, 8.0], - ['2013-01-01T12:00:00+00:00', 3600.0, 8.0], - ['2013-01-01T12:00:00+00:00', 60.0, 8.0]], - measures) - - def test_get_res_named_metric_measure_aggregation_nominal(self): - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "medium"}) - metric1 = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric1['id'], - params=[{"timestamp": '2013-01-01 12:00:01', - "value": 8}, - {"timestamp": '2013-01-01 12:00:02', - "value": 16}]) - - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "medium"}) - metric2 = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric2['id'], - params=[{"timestamp": '2013-01-01 12:00:01', - "value": 0}, - {"timestamp": '2013-01-01 12:00:02', - "value": 4}]) - - # NOTE(sileht): because the database is never cleaned between each test - # we must ensure that the query will not match resources from an other - # test, to achieve this we set a different name on each test. - name = str(uuid.uuid4()) - self.attributes['name'] = name - - self.attributes['metrics'] = {'foo': metric1['id']} - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - - self.attributes['id'] = str(uuid.uuid4()) - self.attributes['metrics'] = {'foo': metric2['id']} - self.app.post_json("/v1/resource/" + self.resource_type, - params=self.attributes) - - result = self.app.post_json( - "/v1/aggregation/resource/" + self.resource_type - + "/metric/foo?aggregation=max", - params={"=": {"name": name}}, - expect_errors=True) - - self.assertEqual(200, result.status_code, result.text) - measures = json.loads(result.text) - self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 16.0], - [u'2013-01-01T12:00:00+00:00', 3600.0, 16.0], - [u'2013-01-01T12:00:00+00:00', 60.0, 16.0]], - measures) - - result = self.app.post_json( - "/v1/aggregation/resource/" - + self.resource_type + "/metric/foo?aggregation=min", - params={"=": {"name": name}}, - expect_errors=True) - - self.assertEqual(200, result.status_code) - measures = json.loads(result.text) - self.assertEqual([['2013-01-01T00:00:00+00:00', 86400.0, 0], - ['2013-01-01T12:00:00+00:00', 3600.0, 0], - ['2013-01-01T12:00:00+00:00', 60.0, 0]], - measures) - - def test_get_aggregated_measures_across_entities_no_match(self): - result = self.app.post_json( - "/v1/aggregation/resource/" - + self.resource_type + "/metric/foo?aggregation=min", - params={"=": {"name": "none!"}}, - expect_errors=True) - - self.assertEqual(200, result.status_code) - measures = json.loads(result.text) - self.assertEqual([], measures) - - def test_get_aggregated_measures_across_entities(self): - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "medium"}) - metric1 = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric1['id'], - params=[{"timestamp": '2013-01-01 12:00:01', - "value": 8}, - {"timestamp": '2013-01-01 12:00:02', - "value": 16}]) - - result = self.app.post_json("/v1/metric", - params={"archive_policy_name": "medium"}) - metric2 = json.loads(result.text) - self.app.post_json("/v1/metric/%s/measures" % metric2['id'], - params=[{"timestamp": '2013-01-01 12:00:01', - "value": 0}, - {"timestamp": '2013-01-01 12:00:02', - "value": 4}]) - # Check with one metric - result = self.app.get("/v1/aggregation/metric" - "?aggregation=mean&metric=%s" % (metric2['id'])) - measures = json.loads(result.text) - self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 2.0], - [u'2013-01-01T12:00:00+00:00', 3600.0, 2.0], - [u'2013-01-01T12:00:00+00:00', 60.0, 2.0]], - measures) - - # Check with two metrics - result = self.app.get("/v1/aggregation/metric" - "?aggregation=mean&metric=%s&metric=%s" % - (metric1['id'], metric2['id'])) - measures = json.loads(result.text) - self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 7.0], - [u'2013-01-01T12:00:00+00:00', 3600.0, 7.0], - [u'2013-01-01T12:00:00+00:00', 60.0, 7.0]], - measures) - - def test_search_resources_with_like(self): - result = self.app.post_json( - "/v1/resource/" + self.resource_type, - params=self.attributes) - created_resource = json.loads(result.text) - - result = self.app.post_json( - "/v1/search/resource/" + self.resource_type, - params={"like": {"name": "my%"}}, - status=200) - - resources = json.loads(result.text) - self.assertIn(created_resource, resources) - - result = self.app.post_json( - "/v1/search/resource/" + self.resource_type, - params={"like": {"name": str(uuid.uuid4())}}, - status=200) - resources = json.loads(result.text) - self.assertEqual([], resources) - - -class GenericResourceTest(RestTest): - def test_list_resources_tied_to_user(self): - resource_id = str(uuid.uuid4()) - self.app.post_json( - "/v1/resource/generic", - params={ - "id": resource_id, - "started_at": "2014-01-01 02:02:02", - "user_id": str(uuid.uuid4()), - "project_id": str(uuid.uuid4()), - }) - - with self.app.use_another_user(): - result = self.app.get("/v1/resource/generic") - resources = json.loads(result.text) - for resource in resources: - if resource['id'] == resource_id: - self.fail("Resource found") - - def test_get_resources_metric_tied_to_user(self): - resource_id = str(uuid.uuid4()) - self.app.post_json( - "/v1/resource/generic", - params={ - "id": resource_id, - "started_at": "2014-01-01 02:02:02", - "user_id": TestingApp.USER_ID_2, - "project_id": TestingApp.PROJECT_ID_2, - "metrics": {"foobar": {"archive_policy_name": "low"}}, - }) - - # This user created it, she can access it - self.app.get( - "/v1/resource/generic/%s/metric/foobar" % resource_id) - - with self.app.use_another_user(): - # This user "owns it", it should be able to access it - self.app.get( - "/v1/resource/generic/%s/metric/foobar" % resource_id) - - def test_search_resources_invalid_query(self): - result = self.app.post_json( - "/v1/search/resource/generic", - params={"wrongoperator": {"user_id": "bar"}}, - status=400) - self.assertIn( - "Invalid input: extra keys not allowed @ data[" - + repr(u'wrongoperator') + "]", - result.text) - - -class QueryStringSearchAttrFilterTest(tests_base.TestCase): - def _do_test(self, expr, expected): - req = rest.QueryStringSearchAttrFilter.parse(expr) - self.assertEqual(expected, req) - - def test_search_query_builder(self): - self._do_test('foo=7EED6CC3-EDC8-48C9-8EF6-8A36B9ACC91C', - {"=": {"foo": "7EED6CC3-EDC8-48C9-8EF6-8A36B9ACC91C"}}) - self._do_test('foo=7EED6CC3EDC848C98EF68A36B9ACC91C', - {"=": {"foo": "7EED6CC3EDC848C98EF68A36B9ACC91C"}}) - self._do_test('foo=bar', {"=": {"foo": "bar"}}) - self._do_test('foo!=1', {"!=": {"foo": 1.0}}) - self._do_test('foo=True', {"=": {"foo": True}}) - self._do_test('foo=null', {"=": {"foo": None}}) - self._do_test('foo="null"', {"=": {"foo": "null"}}) - self._do_test('foo in ["null", "foo"]', - {"in": {"foo": ["null", "foo"]}}) - self._do_test(u'foo="quote" and bar≠1', - {"and": [{u"≠": {"bar": 1}}, - {"=": {"foo": "quote"}}]}) - self._do_test('foo="quote" or bar like "%%foo"', - {"or": [{"like": {"bar": "%%foo"}}, - {"=": {"foo": "quote"}}]}) - - self._do_test('not (foo="quote" or bar like "%%foo" or foo="what!" ' - 'or bar="who?")', - {"not": {"or": [ - {"=": {"bar": "who?"}}, - {"=": {"foo": "what!"}}, - {"like": {"bar": "%%foo"}}, - {"=": {"foo": "quote"}}, - ]}}) - - self._do_test('(foo="quote" or bar like "%%foo" or not foo="what!" ' - 'or bar="who?") and cat="meme"', - {"and": [ - {"=": {"cat": "meme"}}, - {"or": [ - {"=": {"bar": "who?"}}, - {"not": {"=": {"foo": "what!"}}}, - {"like": {"bar": "%%foo"}}, - {"=": {"foo": "quote"}}, - ]} - ]}) - - self._do_test('foo="quote" or bar like "%%foo" or foo="what!" ' - 'or bar="who?" and cat="meme"', - {"or": [ - {"and": [ - {"=": {"cat": "meme"}}, - {"=": {"bar": "who?"}}, - ]}, - {"=": {"foo": "what!"}}, - {"like": {"bar": "%%foo"}}, - {"=": {"foo": "quote"}}, - ]}) - - self._do_test('foo="quote" or bar like "%%foo" and foo="what!" ' - 'or bar="who?" or cat="meme"', - {"or": [ - {"=": {"cat": "meme"}}, - {"=": {"bar": "who?"}}, - {"and": [ - {"=": {"foo": "what!"}}, - {"like": {"bar": "%%foo"}}, - ]}, - {"=": {"foo": "quote"}}, - ]}) diff --git a/gnocchi/tests/test_statsd.py b/gnocchi/tests/test_statsd.py deleted file mode 100644 index fc0713d63..000000000 --- a/gnocchi/tests/test_statsd.py +++ /dev/null @@ -1,160 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2016 Red Hat, Inc. -# Copyright © 2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import uuid - -import mock - -from gnocchi import indexer -from gnocchi import statsd -from gnocchi.tests import base as tests_base -from gnocchi import utils - - -class TestStatsd(tests_base.TestCase): - - STATSD_USER_ID = str(uuid.uuid4()) - STATSD_PROJECT_ID = str(uuid.uuid4()) - STATSD_ARCHIVE_POLICY_NAME = "medium" - - def setUp(self): - super(TestStatsd, self).setUp() - - self.conf.set_override("resource_id", - str(uuid.uuid4()), "statsd") - self.conf.set_override("creator", - self.STATSD_USER_ID, "statsd") - self.conf.set_override("archive_policy_name", - self.STATSD_ARCHIVE_POLICY_NAME, "statsd") - - self.stats = statsd.Stats(self.conf) - # Replace storage/indexer with correct ones that have been upgraded - self.stats.storage = self.storage - self.stats.indexer = self.index - self.server = statsd.StatsdServer(self.stats) - - def test_flush_empty(self): - self.server.stats.flush() - - @mock.patch.object(utils, 'utcnow') - def _test_gauge_or_ms(self, metric_type, utcnow): - metric_name = "test_gauge_or_ms" - metric_key = metric_name + "|" + metric_type - utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 58, 36) - self.server.datagram_received( - ("%s:1|%s" % (metric_name, metric_type)).encode('ascii'), - ("127.0.0.1", 12345)) - self.stats.flush() - - r = self.stats.indexer.get_resource('generic', - self.conf.statsd.resource_id, - with_metrics=True) - - metric = r.get_metric(metric_key) - - self.stats.storage.process_background_tasks( - self.stats.indexer, [str(metric.id)], sync=True) - - measures = self.stats.storage.get_measures(metric) - self.assertEqual([ - (utils.datetime_utc(2015, 1, 7), 86400.0, 1.0), - (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.0), - (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0) - ], measures) - - utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37) - # This one is going to be ignored - self.server.datagram_received( - ("%s:45|%s" % (metric_name, metric_type)).encode('ascii'), - ("127.0.0.1", 12345)) - self.server.datagram_received( - ("%s:2|%s" % (metric_name, metric_type)).encode('ascii'), - ("127.0.0.1", 12345)) - self.stats.flush() - - self.stats.storage.process_background_tasks( - self.stats.indexer, [str(metric.id)], sync=True) - - measures = self.stats.storage.get_measures(metric) - self.assertEqual([ - (utils.datetime_utc(2015, 1, 7), 86400.0, 1.5), - (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.5), - (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0), - (utils.datetime_utc(2015, 1, 7, 13, 59), 60.0, 2.0) - ], measures) - - def test_gauge(self): - self._test_gauge_or_ms("g") - - def test_ms(self): - self._test_gauge_or_ms("ms") - - @mock.patch.object(utils, 'utcnow') - def test_counter(self, utcnow): - metric_name = "test_counter" - metric_key = metric_name + "|c" - utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 58, 36) - self.server.datagram_received( - ("%s:1|c" % metric_name).encode('ascii'), - ("127.0.0.1", 12345)) - self.stats.flush() - - r = self.stats.indexer.get_resource('generic', - self.conf.statsd.resource_id, - with_metrics=True) - metric = r.get_metric(metric_key) - self.assertIsNotNone(metric) - - self.stats.storage.process_background_tasks( - self.stats.indexer, [str(metric.id)], sync=True) - - measures = self.stats.storage.get_measures(metric) - self.assertEqual([ - (utils.datetime_utc(2015, 1, 7), 86400.0, 1.0), - (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.0), - (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0)], measures) - - utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37) - self.server.datagram_received( - ("%s:45|c" % metric_name).encode('ascii'), - ("127.0.0.1", 12345)) - self.server.datagram_received( - ("%s:2|c|@0.2" % metric_name).encode('ascii'), - ("127.0.0.1", 12345)) - self.stats.flush() - - self.stats.storage.process_background_tasks( - self.stats.indexer, [str(metric.id)], sync=True) - - measures = self.stats.storage.get_measures(metric) - self.assertEqual([ - (utils.datetime_utc(2015, 1, 7), 86400.0, 28), - (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 28), - (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0), - (utils.datetime_utc(2015, 1, 7, 13, 59), 60.0, 55.0)], measures) - - -class TestStatsdArchivePolicyRule(TestStatsd): - STATSD_ARCHIVE_POLICY_NAME = "" - - def setUp(self): - super(TestStatsdArchivePolicyRule, self).setUp() - try: - self.stats.indexer.create_archive_policy_rule( - "statsd", "*", "medium") - except indexer.ArchivePolicyRuleAlreadyExists: - # Created by another test run - pass diff --git a/gnocchi/tests/test_storage.py b/gnocchi/tests/test_storage.py deleted file mode 100644 index 7047f44de..000000000 --- a/gnocchi/tests/test_storage.py +++ /dev/null @@ -1,1001 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime -import uuid - -import iso8601 -import mock -from oslotest import base -import six.moves - -from gnocchi import archive_policy -from gnocchi import carbonara -from gnocchi import indexer -from gnocchi import storage -from gnocchi.storage import _carbonara -from gnocchi.tests import base as tests_base -from gnocchi.tests import utils as tests_utils -from gnocchi import utils - - -class TestStorageDriver(tests_base.TestCase): - def setUp(self): - super(TestStorageDriver, self).setUp() - # A lot of tests wants a metric, create one - self.metric, __ = self._create_metric() - - def _create_metric(self, archive_policy_name="low"): - m = storage.Metric(uuid.uuid4(), - self.archive_policies[archive_policy_name]) - m_sql = self.index.create_metric(m.id, str(uuid.uuid4()), - archive_policy_name) - return m, m_sql - - def trigger_processing(self, metrics=None): - if metrics is None: - metrics = [str(self.metric.id)] - self.storage.process_background_tasks(self.index, metrics, sync=True) - - def test_get_driver(self): - driver = storage.get_driver(self.conf) - self.assertIsInstance(driver, storage.StorageDriver) - - def test_corrupted_data(self): - if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage): - self.skipTest("This driver is not based on Carbonara") - - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), - ]) - self.trigger_processing() - - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 13, 0, 1), 1), - ]) - - with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.unserialize', - side_effect=carbonara.InvalidData()): - with mock.patch('gnocchi.carbonara.BoundTimeSerie.unserialize', - side_effect=carbonara.InvalidData()): - self.trigger_processing() - - m = self.storage.get_measures(self.metric) - self.assertIn((utils.datetime_utc(2014, 1, 1), 86400.0, 1), m) - self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 3600.0, 1), m) - self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 300.0, 1), m) - - def test_aborted_initial_processing(self): - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 5), - ]) - with mock.patch.object(self.storage, '_store_unaggregated_timeserie', - side_effect=Exception): - try: - self.trigger_processing() - except Exception: - pass - - with mock.patch('gnocchi.storage._carbonara.LOG') as LOG: - self.trigger_processing() - self.assertFalse(LOG.error.called) - - m = self.storage.get_measures(self.metric) - self.assertIn((utils.datetime_utc(2014, 1, 1), 86400.0, 5.0), m) - self.assertIn((utils.datetime_utc(2014, 1, 1, 12), 3600.0, 5.0), m) - self.assertIn((utils.datetime_utc(2014, 1, 1, 12), 300.0, 5.0), m) - - def test_list_metric_with_measures_to_process(self): - metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming) - self.assertEqual(set(), metrics) - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), - ]) - metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming) - self.assertEqual(set([str(self.metric.id)]), metrics) - self.trigger_processing() - metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming) - self.assertEqual(set([]), metrics) - - def test_delete_nonempty_metric(self): - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), - ]) - self.trigger_processing() - self.storage.delete_metric(self.metric, sync=True) - self.trigger_processing() - self.assertEqual([], self.storage.get_measures(self.metric)) - self.assertRaises(storage.MetricDoesNotExist, - self.storage._get_unaggregated_timeserie, - self.metric) - - def test_delete_nonempty_metric_unprocessed(self): - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), - ]) - self.index.delete_metric(self.metric.id) - self.trigger_processing() - __, __, details = self.storage.incoming._build_report(True) - self.assertIn(str(self.metric.id), details) - self.storage.expunge_metrics(self.index, sync=True) - __, __, details = self.storage.incoming._build_report(True) - self.assertNotIn(str(self.metric.id), details) - - def test_delete_expunge_metric(self): - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), - ]) - self.trigger_processing() - self.index.delete_metric(self.metric.id) - self.storage.expunge_metrics(self.index, sync=True) - self.assertRaises(indexer.NoSuchMetric, self.index.delete_metric, - self.metric.id) - - def test_measures_reporting(self): - report = self.storage.incoming.measures_report(True) - self.assertIsInstance(report, dict) - self.assertIn('summary', report) - self.assertIn('metrics', report['summary']) - self.assertIn('measures', report['summary']) - self.assertIn('details', report) - self.assertIsInstance(report['details'], dict) - report = self.storage.incoming.measures_report(False) - self.assertIsInstance(report, dict) - self.assertIn('summary', report) - self.assertIn('metrics', report['summary']) - self.assertIn('measures', report['summary']) - self.assertNotIn('details', report) - - def test_add_measures_big(self): - m, __ = self._create_metric('high') - self.storage.incoming.add_measures(m, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, i, j), 100) - for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)]) - self.trigger_processing([str(m.id)]) - - self.assertEqual(3661, len(self.storage.get_measures(m))) - - @mock.patch('gnocchi.carbonara.SplitKey.POINTS_PER_SPLIT', 48) - def test_add_measures_update_subset_split(self): - m, m_sql = self._create_metric('medium') - measures = [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, i, j, 0), 100) - for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] - self.storage.incoming.add_measures(m, measures) - self.trigger_processing([str(m.id)]) - - # add measure to end, in same aggregate time as last point. - self.storage.incoming.add_measures(m, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, 1, 58, 1), 100)]) - - with mock.patch.object(self.storage, '_store_metric_measures') as c: - # should only resample last aggregate - self.trigger_processing([str(m.id)]) - count = 0 - for call in c.mock_calls: - # policy is 60 points and split is 48. should only update 2nd half - args = call[1] - if args[0] == m_sql and args[2] == 'mean' and args[3] == 60.0: - count += 1 - self.assertEqual(1, count) - - def test_add_measures_update_subset(self): - m, m_sql = self._create_metric('medium') - measures = [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 6, i, j, 0), 100) - for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)] - self.storage.incoming.add_measures(m, measures) - self.trigger_processing([str(m.id)]) - - # add measure to end, in same aggregate time as last point. - new_point = utils.dt_to_unix_ns(2014, 1, 6, 1, 58, 1) - self.storage.incoming.add_measures( - m, [storage.Measure(new_point, 100)]) - - with mock.patch.object(self.storage.incoming, 'add_measures') as c: - self.trigger_processing([str(m.id)]) - for __, args, __ in c.mock_calls: - self.assertEqual( - list(args[3])[0][0], carbonara.round_timestamp( - new_point, args[1].granularity * 10e8)) - - def test_delete_old_measures(self): - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), - ]) - self.trigger_processing() - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), - (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), - ], self.storage.get_measures(self.metric)) - - # One year later… - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2015, 1, 1, 12, 0, 1), 69), - ]) - self.trigger_processing() - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2015, 1, 1), 86400.0, 69), - (utils.datetime_utc(2015, 1, 1, 12), 3600.0, 69), - (utils.datetime_utc(2015, 1, 1, 12), 300.0, 69), - ], self.storage.get_measures(self.metric)) - - self.assertEqual({"1244160000.0"}, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 86400.0)) - self.assertEqual({"1412640000.0"}, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 3600.0)) - self.assertEqual({"1419120000.0"}, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 300.0)) - - def test_rewrite_measures(self): - # Create an archive policy that spans on several splits. Each split - # being 3600 points, let's go for 36k points so we have 10 splits. - apname = str(uuid.uuid4()) - ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)]) - self.index.create_archive_policy(ap) - self.metric = storage.Metric(uuid.uuid4(), ap) - self.index.create_metric(self.metric.id, str(uuid.uuid4()), - apname) - - # First store some points scattered across different splits - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42), - storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44), - ]) - self.trigger_processing() - - splits = {'1451520000.0', '1451736000.0', '1451952000.0'} - self.assertEqual(splits, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 60.0)) - - if self.storage.WRITE_FULL: - assertCompressedIfWriteFull = self.assertTrue - else: - assertCompressedIfWriteFull = self.assertFalse - - data = self.storage._get_measures( - self.metric, '1451520000.0', "mean", 60.0) - self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, '1451736000.0', "mean", 60.0) - self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, '1451952000.0', "mean", 60.0) - assertCompressedIfWriteFull( - carbonara.AggregatedTimeSerie.is_compressed(data)) - - self.assertEqual([ - (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), - (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), - (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), - (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), - ], self.storage.get_measures(self.metric, granularity=60.0)) - - # Now store brand new points that should force a rewrite of one of the - # split (keep in mind the back window size in one hour here). We move - # the BoundTimeSerie processing timeserie far away from its current - # range. - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 16, 18, 45), 45), - storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 17, 12, 45), 46), - ]) - self.trigger_processing() - - self.assertEqual({'1452384000.0', '1451736000.0', - '1451520000.0', '1451952000.0'}, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 60.0)) - data = self.storage._get_measures( - self.metric, '1451520000.0', "mean", 60.0) - self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, '1451736000.0', "mean", 60.0) - self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, '1451952000.0', "mean", 60.0) - # Now this one is compressed because it has been rewritten! - self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, '1452384000.0', "mean", 60.0) - assertCompressedIfWriteFull( - carbonara.AggregatedTimeSerie.is_compressed(data)) - - self.assertEqual([ - (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), - (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), - (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), - (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), - (utils.datetime_utc(2016, 1, 10, 16, 18), 60.0, 45), - (utils.datetime_utc(2016, 1, 10, 17, 12), 60.0, 46), - ], self.storage.get_measures(self.metric, granularity=60.0)) - - def test_rewrite_measures_oldest_mutable_timestamp_eq_next_key(self): - """See LP#1655422""" - # Create an archive policy that spans on several splits. Each split - # being 3600 points, let's go for 36k points so we have 10 splits. - apname = str(uuid.uuid4()) - ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)]) - self.index.create_archive_policy(ap) - self.metric = storage.Metric(uuid.uuid4(), ap) - self.index.create_metric(self.metric.id, str(uuid.uuid4()), - apname) - - # First store some points scattered across different splits - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42), - storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44), - ]) - self.trigger_processing() - - splits = {'1451520000.0', '1451736000.0', '1451952000.0'} - self.assertEqual(splits, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 60.0)) - - if self.storage.WRITE_FULL: - assertCompressedIfWriteFull = self.assertTrue - else: - assertCompressedIfWriteFull = self.assertFalse - - data = self.storage._get_measures( - self.metric, '1451520000.0', "mean", 60.0) - self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, '1451736000.0', "mean", 60.0) - self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, '1451952000.0', "mean", 60.0) - assertCompressedIfWriteFull( - carbonara.AggregatedTimeSerie.is_compressed(data)) - - self.assertEqual([ - (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), - (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), - (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), - (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), - ], self.storage.get_measures(self.metric, granularity=60.0)) - - # Now store brand new points that should force a rewrite of one of the - # split (keep in mind the back window size in one hour here). We move - # the BoundTimeSerie processing timeserie far away from its current - # range. - - # Here we test a special case where the oldest_mutable_timestamp will - # be 2016-01-10TOO:OO:OO = 1452384000.0, our new split key. - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 0, 12), 45), - ]) - self.trigger_processing() - - self.assertEqual({'1452384000.0', '1451736000.0', - '1451520000.0', '1451952000.0'}, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 60.0)) - data = self.storage._get_measures( - self.metric, '1451520000.0', "mean", 60.0) - self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, '1451736000.0', "mean", 60.0) - self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, '1451952000.0', "mean", 60.0) - # Now this one is compressed because it has been rewritten! - self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, '1452384000.0', "mean", 60.0) - assertCompressedIfWriteFull( - carbonara.AggregatedTimeSerie.is_compressed(data)) - - self.assertEqual([ - (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), - (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), - (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), - (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), - (utils.datetime_utc(2016, 1, 10, 0, 12), 60.0, 45), - ], self.storage.get_measures(self.metric, granularity=60.0)) - - def test_rewrite_measures_corruption_missing_file(self): - # Create an archive policy that spans on several splits. Each split - # being 3600 points, let's go for 36k points so we have 10 splits. - apname = str(uuid.uuid4()) - ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)]) - self.index.create_archive_policy(ap) - self.metric = storage.Metric(uuid.uuid4(), ap) - self.index.create_metric(self.metric.id, str(uuid.uuid4()), - apname) - - # First store some points scattered across different splits - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42), - storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44), - ]) - self.trigger_processing() - - splits = {'1451520000.0', '1451736000.0', '1451952000.0'} - self.assertEqual(splits, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 60.0)) - - if self.storage.WRITE_FULL: - assertCompressedIfWriteFull = self.assertTrue - else: - assertCompressedIfWriteFull = self.assertFalse - - data = self.storage._get_measures( - self.metric, '1451520000.0', "mean", 60.0) - self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, '1451736000.0', "mean", 60.0) - self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, '1451952000.0', "mean", 60.0) - assertCompressedIfWriteFull( - carbonara.AggregatedTimeSerie.is_compressed(data)) - - self.assertEqual([ - (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), - (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), - (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), - (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), - ], self.storage.get_measures(self.metric, granularity=60.0)) - - # Test what happens if we delete the latest split and then need to - # compress it! - self.storage._delete_metric_measures(self.metric, - '1451952000.0', - 'mean', 60.0) - - # Now store brand new points that should force a rewrite of one of the - # split (keep in mind the back window size in one hour here). We move - # the BoundTimeSerie processing timeserie far away from its current - # range. - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 16, 18, 45), 45), - storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 17, 12, 45), 46), - ]) - self.trigger_processing() - - def test_rewrite_measures_corruption_bad_data(self): - # Create an archive policy that spans on several splits. Each split - # being 3600 points, let's go for 36k points so we have 10 splits. - apname = str(uuid.uuid4()) - ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)]) - self.index.create_archive_policy(ap) - self.metric = storage.Metric(uuid.uuid4(), ap) - self.index.create_metric(self.metric.id, str(uuid.uuid4()), - apname) - - # First store some points scattered across different splits - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42), - storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44), - ]) - self.trigger_processing() - - splits = {'1451520000.0', '1451736000.0', '1451952000.0'} - self.assertEqual(splits, - self.storage._list_split_keys_for_metric( - self.metric, "mean", 60.0)) - - if self.storage.WRITE_FULL: - assertCompressedIfWriteFull = self.assertTrue - else: - assertCompressedIfWriteFull = self.assertFalse - - data = self.storage._get_measures( - self.metric, '1451520000.0', "mean", 60.0) - self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, '1451736000.0', "mean", 60.0) - self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) - data = self.storage._get_measures( - self.metric, '1451952000.0', "mean", 60.0) - assertCompressedIfWriteFull( - carbonara.AggregatedTimeSerie.is_compressed(data)) - - self.assertEqual([ - (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), - (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), - (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), - (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), - ], self.storage.get_measures(self.metric, granularity=60.0)) - - # Test what happens if we write garbage - self.storage._store_metric_measures( - self.metric, '1451952000.0', "mean", 60.0, b"oh really?") - - # Now store brand new points that should force a rewrite of one of the - # split (keep in mind the back window size in one hour here). We move - # the BoundTimeSerie processing timeserie far away from its current - # range. - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 16, 18, 45), 45), - storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 17, 12, 45), 46), - ]) - self.trigger_processing() - - def test_updated_measures(self): - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), - ]) - self.trigger_processing() - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 55.5), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 55.5), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69), - (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 42.0), - ], self.storage.get_measures(self.metric)) - - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), - ]) - self.trigger_processing() - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), - (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), - ], self.storage.get_measures(self.metric)) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 69), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 69.0), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), - (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 42.0), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), - ], self.storage.get_measures(self.metric, aggregation='max')) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 4), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 4), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), - (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 4.0), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), - ], self.storage.get_measures(self.metric, aggregation='min')) - - def test_add_and_get_measures(self): - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), - ]) - self.trigger_processing() - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), - (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), - ], self.storage.get_measures(self.metric)) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), - ], self.storage.get_measures( - self.metric, - from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 0))) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), - (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0), - ], self.storage.get_measures( - self.metric, - to_timestamp=datetime.datetime(2014, 1, 1, 12, 6, 0))) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), - ], self.storage.get_measures( - self.metric, - to_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10), - from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10))) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), - ], self.storage.get_measures( - self.metric, - from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), - to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2))) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), - ], self.storage.get_measures( - self.metric, - from_timestamp=iso8601.parse_date("2014-1-1 13:00:00+01:00"), - to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2))) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), - ], self.storage.get_measures( - self.metric, - from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), - to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2), - granularity=3600.0)) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), - ], self.storage.get_measures( - self.metric, - from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), - to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2), - granularity=300.0)) - - self.assertRaises(storage.GranularityDoesNotExist, - self.storage.get_measures, - self.metric, - granularity=42) - - def test_get_cross_metric_measures_unknown_metric(self): - self.assertEqual([], - self.storage.get_cross_metric_measures( - [storage.Metric(uuid.uuid4(), - self.archive_policies['low']), - storage.Metric(uuid.uuid4(), - self.archive_policies['low'])])) - - def test_get_measure_unknown_aggregation(self): - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), - ]) - self.assertRaises(storage.AggregationDoesNotExist, - self.storage.get_measures, - self.metric, aggregation='last') - - def test_get_cross_metric_measures_unknown_aggregation(self): - metric2 = storage.Metric(uuid.uuid4(), - self.archive_policies['low']) - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), - ]) - self.storage.incoming.add_measures(metric2, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), - ]) - self.assertRaises(storage.AggregationDoesNotExist, - self.storage.get_cross_metric_measures, - [self.metric, metric2], - aggregation='last') - - def test_get_cross_metric_measures_unknown_granularity(self): - metric2 = storage.Metric(uuid.uuid4(), - self.archive_policies['low']) - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), - ]) - self.storage.incoming.add_measures(metric2, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), - ]) - self.assertRaises(storage.GranularityDoesNotExist, - self.storage.get_cross_metric_measures, - [self.metric, metric2], - granularity=12345.456) - - def test_add_and_get_cross_metric_measures_different_archives(self): - metric2 = storage.Metric(uuid.uuid4(), - self.archive_policies['no_granularity_match']) - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), - ]) - self.storage.incoming.add_measures(metric2, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), - ]) - - self.assertRaises(storage.MetricUnaggregatable, - self.storage.get_cross_metric_measures, - [self.metric, metric2]) - - def test_add_and_get_cross_metric_measures(self): - metric2, __ = self._create_metric() - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), - ]) - self.storage.incoming.add_measures(metric2, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 9), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 41), 2), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 10, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 13, 10), 4), - ]) - self.trigger_processing([str(self.metric.id), str(metric2.id)]) - - values = self.storage.get_cross_metric_measures([self.metric, metric2]) - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), - (utils.datetime_utc(2014, 1, 1, 12, 5, 0), 300.0, 12.5), - (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 24.0) - ], values) - - values = self.storage.get_cross_metric_measures([self.metric, metric2], - reaggregation='max') - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 39.75), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 69), - (utils.datetime_utc(2014, 1, 1, 12, 5, 0), 300.0, 23), - (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 44) - ], values) - - values = self.storage.get_cross_metric_measures( - [self.metric, metric2], - from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 0)) - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 24.0), - ], values) - - values = self.storage.get_cross_metric_measures( - [self.metric, metric2], - to_timestamp=datetime.datetime(2014, 1, 1, 12, 5, 0)) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), - ], values) - - values = self.storage.get_cross_metric_measures( - [self.metric, metric2], - from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10), - to_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10)) - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 24.0), - ], values) - - values = self.storage.get_cross_metric_measures( - [self.metric, metric2], - from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), - to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 1)) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 22.25), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), - ], values) - - values = self.storage.get_cross_metric_measures( - [self.metric, metric2], - from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), - to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 1), - granularity=300.0) - - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), - ], values) - - def test_add_and_get_cross_metric_measures_with_holes(self): - metric2, __ = self._create_metric() - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 5, 31), 8), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 42), - ]) - self.storage.incoming.add_measures(metric2, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 9), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 2), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 6), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 13, 10), 2), - ]) - self.trigger_processing([str(self.metric.id), str(metric2.id)]) - - values = self.storage.get_cross_metric_measures([self.metric, metric2]) - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 18.875), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 18.875), - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), - (utils.datetime_utc(2014, 1, 1, 12, 5, 0), 300.0, 11.0), - (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 22.0) - ], values) - - def test_search_value(self): - metric2, __ = self._create_metric() - self.storage.incoming.add_measures(self.metric, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1,), 69), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 5, 31), 8), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 42), - ]) - - self.storage.incoming.add_measures(metric2, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 9), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 2), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 6), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 13, 10), 2), - ]) - self.trigger_processing([str(self.metric.id), str(metric2.id)]) - - self.assertEqual( - {metric2: [], - self.metric: [ - (utils.datetime_utc(2014, 1, 1), 86400, 33), - (utils.datetime_utc(2014, 1, 1, 12), 3600, 33), - (utils.datetime_utc(2014, 1, 1, 12), 300, 69), - (utils.datetime_utc(2014, 1, 1, 12, 10), 300, 42)]}, - self.storage.search_value( - [metric2, self.metric], - {u"≥": 30})) - - self.assertEqual( - {metric2: [], self.metric: []}, - self.storage.search_value( - [metric2, self.metric], - {u"∧": [ - {u"eq": 100}, - {u"≠": 50}]})) - - def test_resize_policy(self): - name = str(uuid.uuid4()) - ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)]) - self.index.create_archive_policy(ap) - m = self.index.create_metric(uuid.uuid4(), str(uuid.uuid4()), name) - m = self.index.list_metrics(ids=[m.id])[0] - self.storage.incoming.add_measures(m, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 0), 1), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 1), - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 10), 1), - ]) - self.trigger_processing([str(m.id)]) - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0), - (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0), - (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0), - ], self.storage.get_measures(m)) - # expand to more points - self.index.update_archive_policy( - name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)]) - m = self.index.list_metrics(ids=[m.id])[0] - self.storage.incoming.add_measures(m, [ - storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 15), 1), - ]) - self.trigger_processing([str(m.id)]) - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0), - (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0), - (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0), - (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0), - ], self.storage.get_measures(m)) - # shrink timespan - self.index.update_archive_policy( - name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)]) - m = self.index.list_metrics(ids=[m.id])[0] - self.assertEqual([ - (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0), - (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0), - ], self.storage.get_measures(m)) - - -class TestMeasureQuery(base.BaseTestCase): - def test_equal(self): - q = storage.MeasureQuery({"=": 4}) - self.assertTrue(q(4)) - self.assertFalse(q(40)) - - def test_gt(self): - q = storage.MeasureQuery({">": 4}) - self.assertTrue(q(40)) - self.assertFalse(q(4)) - - def test_and(self): - q = storage.MeasureQuery({"and": [{">": 4}, {"<": 10}]}) - self.assertTrue(q(5)) - self.assertFalse(q(40)) - self.assertFalse(q(1)) - - def test_or(self): - q = storage.MeasureQuery({"or": [{"=": 4}, {"=": 10}]}) - self.assertTrue(q(4)) - self.assertTrue(q(10)) - self.assertFalse(q(-1)) - - def test_modulo(self): - q = storage.MeasureQuery({"=": [{"%": 5}, 0]}) - self.assertTrue(q(5)) - self.assertTrue(q(10)) - self.assertFalse(q(-1)) - self.assertFalse(q(6)) - - def test_math(self): - q = storage.MeasureQuery( - { - u"and": [ - # v+5 is bigger 0 - {u"≥": [{u"+": 5}, 0]}, - # v-6 is not 5 - {u"≠": [5, {u"-": 6}]}, - ], - } - ) - self.assertTrue(q(5)) - self.assertTrue(q(10)) - self.assertFalse(q(11)) - - def test_empty(self): - q = storage.MeasureQuery({}) - self.assertFalse(q(5)) - self.assertFalse(q(10)) - - def test_bad_format(self): - self.assertRaises(storage.InvalidQuery, - storage.MeasureQuery, - {"foo": [{"=": 4}, {"=": 10}]}) - - self.assertRaises(storage.InvalidQuery, - storage.MeasureQuery, - {"=": [1, 2, 3]}) diff --git a/gnocchi/tests/test_utils.py b/gnocchi/tests/test_utils.py deleted file mode 100644 index d90bc2873..000000000 --- a/gnocchi/tests/test_utils.py +++ /dev/null @@ -1,105 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime -import os -import uuid - -import iso8601 -import mock - -from gnocchi.tests import base as tests_base -from gnocchi import utils - - -class TestUtils(tests_base.TestCase): - def _do_test_datetime_to_unix_timezone_change(self, expected, dt): - self.assertEqual(expected, utils.datetime_to_unix(dt)) - with mock.patch.dict(os.environ, {'TZ': 'UTC'}): - self.assertEqual(expected, utils.datetime_to_unix(dt)) - with mock.patch.dict(os.environ, {'TZ': 'Europe/Paris'}): - self.assertEqual(expected, utils.datetime_to_unix(dt)) - with mock.patch.dict(os.environ, {'TZ': 'US/Eastern'}): - self.assertEqual(expected, utils.datetime_to_unix(dt)) - - def test_datetime_to_unix_timezone_change_utc(self): - dt = datetime.datetime(2015, 1, 1, 10, 0, tzinfo=iso8601.iso8601.UTC) - self._do_test_datetime_to_unix_timezone_change(1420106400.0, dt) - - def test_datetime_to_unix_timezone_change_offset(self): - dt = datetime.datetime(2015, 1, 1, 15, 0, - tzinfo=iso8601.iso8601.FixedOffset(5, 0, '+5h')) - self._do_test_datetime_to_unix_timezone_change(1420106400.0, dt) - - def test_to_timestamps_epoch(self): - self.assertEqual( - utils.to_datetime("1425652440"), - datetime.datetime(2015, 3, 6, 14, 34, - tzinfo=iso8601.iso8601.UTC)) - self.assertEqual( - utils.to_datetime("1425652440.4"), - datetime.datetime(2015, 3, 6, 14, 34, 0, 400000, - tzinfo=iso8601.iso8601.UTC)) - self.assertEqual( - utils.to_datetime(1425652440), - datetime.datetime(2015, 3, 6, 14, 34, - tzinfo=iso8601.iso8601.UTC)) - self.assertEqual( - utils.to_datetime(utils.to_timestamp(1425652440.4)), - datetime.datetime(2015, 3, 6, 14, 34, 0, 400000, - tzinfo=iso8601.iso8601.UTC)) - - -class TestResourceUUID(tests_base.TestCase): - def test_conversion(self): - self.assertEqual( - uuid.UUID('ba571521-1de6-5aff-b183-1535fd6eb5d0'), - utils.ResourceUUID( - uuid.UUID('ba571521-1de6-5aff-b183-1535fd6eb5d0'), - "bar")) - self.assertEqual( - uuid.UUID('ba571521-1de6-5aff-b183-1535fd6eb5d0'), - utils.ResourceUUID("foo", "bar")) - self.assertEqual( - uuid.UUID('4efb21f6-3d19-5fe3-910b-be8f0f727846'), - utils.ResourceUUID("foo", None)) - self.assertEqual( - uuid.UUID('853e5c64-f45e-58b2-999c-96df856fbe3d'), - utils.ResourceUUID("foo", "")) - - -class StopWatchTest(tests_base.TestCase): - def test_no_states(self): - watch = utils.StopWatch() - self.assertRaises(RuntimeError, watch.stop) - - def test_start_stop(self): - watch = utils.StopWatch() - watch.start() - watch.stop() - - def test_no_elapsed(self): - watch = utils.StopWatch() - self.assertRaises(RuntimeError, watch.elapsed) - - def test_elapsed(self): - watch = utils.StopWatch() - watch.start() - watch.stop() - elapsed = watch.elapsed() - self.assertAlmostEqual(elapsed, watch.elapsed()) - - def test_context_manager(self): - with utils.StopWatch() as watch: - pass - self.assertGreater(watch.elapsed(), 0) diff --git a/gnocchi/tests/utils.py b/gnocchi/tests/utils.py deleted file mode 100644 index e9b0b3391..000000000 --- a/gnocchi/tests/utils.py +++ /dev/null @@ -1,19 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import six - - -def list_all_incoming_metrics(incoming): - return set.union(*[incoming.list_metric_with_measures_to_process(i) - for i in six.moves.range(incoming.NUM_SACKS)]) diff --git a/gnocchi/utils.py b/gnocchi/utils.py deleted file mode 100644 index b7e92263c..000000000 --- a/gnocchi/utils.py +++ /dev/null @@ -1,299 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright © 2015-2017 Red Hat, Inc. -# Copyright © 2015-2016 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime -import distutils.util -import errno -import itertools -import multiprocessing -import numbers -import os -import uuid - -import iso8601 -import monotonic -import numpy -from oslo_log import log -import pandas as pd -import six -import tenacity -from tooz import coordination - -LOG = log.getLogger(__name__) - -# uuid5 namespace for id transformation. -# NOTE(chdent): This UUID must stay the same, forever, across all -# of gnocchi to preserve its value as a URN namespace. -RESOURCE_ID_NAMESPACE = uuid.UUID('0a7a15ff-aa13-4ac2-897c-9bdf30ce175b') - - -def ResourceUUID(value, creator): - if isinstance(value, uuid.UUID): - return value - if '/' in value: - raise ValueError("'/' is not supported in resource id") - try: - return uuid.UUID(value) - except ValueError: - if len(value) <= 255: - if creator is None: - creator = "\x00" - # value/creator must be str (unicode) in Python 3 and str (bytes) - # in Python 2. It's not logical, I know. - if six.PY2: - value = value.encode('utf-8') - creator = creator.encode('utf-8') - return uuid.uuid5(RESOURCE_ID_NAMESPACE, - value + "\x00" + creator) - raise ValueError( - 'transformable resource id >255 max allowed characters') - - -def UUID(value): - try: - return uuid.UUID(value) - except Exception as e: - raise ValueError(e) - - -# Retry with exponential backoff for up to 1 minute -retry = tenacity.retry( - wait=tenacity.wait_exponential(multiplier=0.5, max=60), - # Never retry except when explicitly asked by raising TryAgain - retry=tenacity.retry_never, - reraise=True) - - -# TODO(jd) Move this to tooz? -@retry -def _enable_coordination(coord): - try: - coord.start(start_heart=True) - except Exception as e: - LOG.error("Unable to start coordinator: %s", e) - raise tenacity.TryAgain(e) - - -def get_coordinator_and_start(url): - my_id = uuid.uuid4().bytes - coord = coordination.get_coordinator(url, my_id) - _enable_coordination(coord) - return coord, my_id - - -unix_universal_start64 = numpy.datetime64("1970") - - -def to_timestamps(values): - try: - values = list(values) - if isinstance(values[0], numbers.Real): - times = pd.to_datetime(values, utc=True, box=False, unit='s') - elif (isinstance(values[0], datetime.datetime) or - is_valid_timestamp(values[0])): - times = pd.to_datetime(values, utc=True, box=False) - else: - try: - float(values[0]) - except ValueError: - times = (utcnow() + pd.to_timedelta(values)).values - else: - times = pd.to_datetime(list(map(float, values)), - utc=True, box=False, unit='s') - except ValueError: - raise ValueError("Unable to convert timestamps") - - if (times < unix_universal_start64).any(): - raise ValueError('Timestamp must be after Epoch') - - return times - - -def is_valid_timestamp(value): - try: - pd.to_datetime(value) - except Exception: - return False - return True - - -def to_timestamp(value): - return to_timestamps((value,))[0] - - -def to_datetime(value): - return timestamp_to_datetime(to_timestamp(value)) - - -def timestamp_to_datetime(v): - return datetime.datetime.utcfromtimestamp( - v.astype(float) / 10e8).replace(tzinfo=iso8601.iso8601.UTC) - - -def to_timespan(value): - if value is None: - raise ValueError("Invalid timespan") - try: - seconds = float(value) - except Exception: - try: - seconds = pd.to_timedelta(value).total_seconds() - except Exception: - raise ValueError("Unable to parse timespan") - if seconds <= 0: - raise ValueError("Timespan must be positive") - return datetime.timedelta(seconds=seconds) - - -def utcnow(): - """Version of utcnow() that returns utcnow with a correct TZ.""" - return datetime.datetime.now(tz=iso8601.iso8601.UTC) - - -def normalize_time(timestamp): - """Normalize time in arbitrary timezone to UTC naive object.""" - offset = timestamp.utcoffset() - if offset is None: - return timestamp - return timestamp.replace(tzinfo=None) - offset - - -def datetime_utc(*args): - return datetime.datetime(*args, tzinfo=iso8601.iso8601.UTC) - - -unix_universal_start = datetime_utc(1970, 1, 1) - - -def datetime_to_unix(timestamp): - return (timestamp - unix_universal_start).total_seconds() - - -def dt_to_unix_ns(*args): - return int(datetime_to_unix(datetime.datetime( - *args, tzinfo=iso8601.iso8601.UTC)) * int(10e8)) - - -def dt_in_unix_ns(timestamp): - return int(datetime_to_unix(timestamp) * int(10e8)) - - -def get_default_workers(): - try: - default_workers = multiprocessing.cpu_count() or 1 - except NotImplementedError: - default_workers = 1 - return default_workers - - -def grouper(iterable, n): - it = iter(iterable) - while True: - chunk = tuple(itertools.islice(it, n)) - if not chunk: - return - yield chunk - - -def ensure_paths(paths): - for p in paths: - try: - os.makedirs(p) - except OSError as e: - if e.errno != errno.EEXIST: - raise - - -def strtobool(v): - if isinstance(v, bool): - return v - return bool(distutils.util.strtobool(v)) - - -class StopWatch(object): - """A simple timer/stopwatch helper class. - - Inspired by: apache-commons-lang java stopwatch. - - Not thread-safe (when a single watch is mutated by multiple threads at - the same time). Thread-safe when used by a single thread (not shared) or - when operations are performed in a thread-safe manner on these objects by - wrapping those operations with locks. - - It will use the `monotonic`_ pypi library to find an appropriate - monotonically increasing time providing function (which typically varies - depending on operating system and python version). - - .. _monotonic: https://pypi.python.org/pypi/monotonic/ - """ - _STARTED = object() - _STOPPED = object() - - def __init__(self): - self._started_at = None - self._stopped_at = None - self._state = None - - def start(self): - """Starts the watch (if not already started). - - NOTE(harlowja): resets any splits previously captured (if any). - """ - if self._state == self._STARTED: - return self - self._started_at = monotonic.monotonic() - self._state = self._STARTED - return self - - @staticmethod - def _delta_seconds(earlier, later): - # Uses max to avoid the delta/time going backwards (and thus negative). - return max(0.0, later - earlier) - - def elapsed(self): - """Returns how many seconds have elapsed.""" - if self._state not in (self._STARTED, self._STOPPED): - raise RuntimeError("Can not get the elapsed time of a stopwatch" - " if it has not been started/stopped") - if self._state == self._STOPPED: - elapsed = self._delta_seconds(self._started_at, self._stopped_at) - else: - elapsed = self._delta_seconds( - self._started_at, monotonic.monotonic()) - return elapsed - - def __enter__(self): - """Starts the watch.""" - self.start() - return self - - def __exit__(self, type, value, traceback): - """Stops the watch (ignoring errors if stop fails).""" - try: - self.stop() - except RuntimeError: - pass - - def stop(self): - """Stops the watch.""" - if self._state == self._STOPPED: - return self - if self._state != self._STARTED: - raise RuntimeError("Can not stop a stopwatch that has not been" - " started") - self._stopped_at = monotonic.monotonic() - self._state = self._STOPPED - return self diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/releasenotes/notes/add-parameter-granularity-7f22c677dc1b1238.yaml b/releasenotes/notes/add-parameter-granularity-7f22c677dc1b1238.yaml deleted file mode 100644 index 2f8338087..000000000 --- a/releasenotes/notes/add-parameter-granularity-7f22c677dc1b1238.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - Allow to search for values in metrics by using - one or more granularities. diff --git a/releasenotes/notes/archive_policy_bool-9313cae7122c4a2f.yaml b/releasenotes/notes/archive_policy_bool-9313cae7122c4a2f.yaml deleted file mode 100644 index 682a4e4c4..000000000 --- a/releasenotes/notes/archive_policy_bool-9313cae7122c4a2f.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - >- - A new archive policy named *bool* is provided by default. It provides a - cheap and easy way to store boolean measures (0 and 1). diff --git a/releasenotes/notes/auth_type_option-c335b219afba5569.yaml b/releasenotes/notes/auth_type_option-c335b219afba5569.yaml deleted file mode 100644 index 537278641..000000000 --- a/releasenotes/notes/auth_type_option-c335b219afba5569.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - >- - The new `auth_type` option specifies which authentication system to use for - the REST API. Its default is still `noauth`. diff --git a/releasenotes/notes/auth_type_pluggable-76a3c73cac8eec6a.yaml b/releasenotes/notes/auth_type_pluggable-76a3c73cac8eec6a.yaml deleted file mode 100644 index f198eb8af..000000000 --- a/releasenotes/notes/auth_type_pluggable-76a3c73cac8eec6a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - >- - The REST API authentication mechanism is now pluggable. You can write your - own plugin to specify how segregation and policy should be enforced. diff --git a/releasenotes/notes/backfill-cross-aggregation-2de54c7c30b2eb67.yaml b/releasenotes/notes/backfill-cross-aggregation-2de54c7c30b2eb67.yaml deleted file mode 100644 index cdfeee45d..000000000 --- a/releasenotes/notes/backfill-cross-aggregation-2de54c7c30b2eb67.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Add support to backfill timestamps with missing points in a subset of - timeseries when computing aggregation across multiple metrics. User can - specify `fill` value with either a float or `null` value. A granularity - must be specified in addition to `fill`. diff --git a/releasenotes/notes/batch_resource_measures_create_metrics-f73790a8475ad628.yaml b/releasenotes/notes/batch_resource_measures_create_metrics-f73790a8475ad628.yaml deleted file mode 100644 index afccc58bb..000000000 --- a/releasenotes/notes/batch_resource_measures_create_metrics-f73790a8475ad628.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - "When sending measures in batch for resources, it is now possible to pass - `create_metric=true` to the query parameters so missing metrics are created. - This only works if an archive policy rule matching those named metrics matches." diff --git a/releasenotes/notes/ceph-omap-34e069dfb3df764d.yaml b/releasenotes/notes/ceph-omap-34e069dfb3df764d.yaml deleted file mode 100644 index d053330b5..000000000 --- a/releasenotes/notes/ceph-omap-34e069dfb3df764d.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -upgrade: - - Ceph driver has moved the storage of measures metadata - from xattr to omap API. Already created measures are migrated - during gnocchi-upgrade run. diff --git a/releasenotes/notes/ceph-read-async-ca2f7512c6842adb.yaml b/releasenotes/notes/ceph-read-async-ca2f7512c6842adb.yaml deleted file mode 100644 index 2dfe37dea..000000000 --- a/releasenotes/notes/ceph-read-async-ca2f7512c6842adb.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - ceph driver now uses the rados async api to retrieve - measurements to process in parallel. diff --git a/releasenotes/notes/creator_field-6b715c917f6afc93.yaml b/releasenotes/notes/creator_field-6b715c917f6afc93.yaml deleted file mode 100644 index e9b3bfd1b..000000000 --- a/releasenotes/notes/creator_field-6b715c917f6afc93.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -deprecations: - - >- - The `created_by_user_id` and `created_by_project_id` field are now - deprecated and being merged into a unique `creator` field. The old fields - are still returned and managed by the API for now. diff --git a/releasenotes/notes/delete-resources-f10d21fc02f53f16.yaml b/releasenotes/notes/delete-resources-f10d21fc02f53f16.yaml deleted file mode 100644 index 0f6b04214..000000000 --- a/releasenotes/notes/delete-resources-f10d21fc02f53f16.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -feature: - - A new REST API call is provided to delete multiple resources at once using a search filter. diff --git a/releasenotes/notes/deprecate-noauth-01b7e961d9a17e9e.yaml b/releasenotes/notes/deprecate-noauth-01b7e961d9a17e9e.yaml deleted file mode 100644 index 635097c63..000000000 --- a/releasenotes/notes/deprecate-noauth-01b7e961d9a17e9e.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -deprecations: - - The `noauth` authentication mechanism is deprecated and will be removed in - a next version. diff --git a/releasenotes/notes/dynamic-resampling-b5e545b1485c152f.yaml b/releasenotes/notes/dynamic-resampling-b5e545b1485c152f.yaml deleted file mode 100644 index b2c5167be..000000000 --- a/releasenotes/notes/dynamic-resampling-b5e545b1485c152f.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - Add `resample` parameter to support resampling stored time-series to - another granularity not necessarily in existing archive policy. If both - resampling and reaggregation parameters are specified, resampling will - occur prior to reaggregation. diff --git a/releasenotes/notes/fnmatch-python-2.7-c524ce1e1b238b0a.yaml b/releasenotes/notes/fnmatch-python-2.7-c524ce1e1b238b0a.yaml deleted file mode 100644 index bab5e73a9..000000000 --- a/releasenotes/notes/fnmatch-python-2.7-c524ce1e1b238b0a.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -other: - - | - A workaround for a Python 2.7 bug in `fnmatch` has been removed. Makes sure - you use at least Python 2.7.9 to avoid running into it. diff --git a/releasenotes/notes/forbid-slash-b3ec2bc77cc34b49.yaml b/releasenotes/notes/forbid-slash-b3ec2bc77cc34b49.yaml deleted file mode 100644 index 5999cb7f3..000000000 --- a/releasenotes/notes/forbid-slash-b3ec2bc77cc34b49.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - \'/\' in resource id and metric name have been accepted by mistake, because - they can be POSTed but not GETed/PATCHed/DELETEd. Now this char is forbidden - in resource id and metric name, REST api will return 400 if it presents. - Metric name and resource id already present with a \'/\' have their \'/\' replaced - by \'_\'. diff --git a/releasenotes/notes/gnocchi_config_generator-0fc337ba8e3afd5f.yaml b/releasenotes/notes/gnocchi_config_generator-0fc337ba8e3afd5f.yaml deleted file mode 100644 index 73af05f2a..000000000 --- a/releasenotes/notes/gnocchi_config_generator-0fc337ba8e3afd5f.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - >- - The `gnocchi-config-generator` program can now generates a default - configuration file, usable as a template for custom tweaking. diff --git a/releasenotes/notes/healthcheck-middleware-81c2f0d02ebdb5cc.yaml b/releasenotes/notes/healthcheck-middleware-81c2f0d02ebdb5cc.yaml deleted file mode 100644 index 5e28af9c8..000000000 --- a/releasenotes/notes/healthcheck-middleware-81c2f0d02ebdb5cc.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - A healthcheck endpoint is provided by default at /healthcheck. It leverages - oslo_middleware healthcheck middleware. It allows to retrieve information - about the health of the API service. diff --git a/releasenotes/notes/incoming-sacks-413f4818882ab83d.yaml b/releasenotes/notes/incoming-sacks-413f4818882ab83d.yaml deleted file mode 100644 index c2cf17ffd..000000000 --- a/releasenotes/notes/incoming-sacks-413f4818882ab83d.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -features: - - | - New measures are now sharded into sacks to better distribute data across - storage driver as well as allow for improved scheduling of aggregation - workload. -upgrade: - - | - The storage driver needs to be upgraded. The number of sacks to distribute - across can be configured on upgrade by passing in ``num-storage-sacks`` - value on upgrade. A default number of sacks will be created if not set. - This can be reconfigured post-upgrade as well by using - ``gnocchi-change-sack-size`` cli. See documentation for hints on the number - of sacks to set for your environment and upgrade notes diff --git a/releasenotes/notes/lighten-default-archive-policies-455561c027edf4ad.yaml b/releasenotes/notes/lighten-default-archive-policies-455561c027edf4ad.yaml deleted file mode 100644 index a213d3e38..000000000 --- a/releasenotes/notes/lighten-default-archive-policies-455561c027edf4ad.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -other: - - The default archive policies "low" and "medium" are now storing less data - than they used to be. They are only using respectively 1 and 2 definition - of archiving policy, which speeds up by 66% and 33% their computing speed. diff --git a/releasenotes/notes/mysql_precise_datetime-57f868f3f42302e2.yaml b/releasenotes/notes/mysql_precise_datetime-57f868f3f42302e2.yaml deleted file mode 100644 index 579c835db..000000000 --- a/releasenotes/notes/mysql_precise_datetime-57f868f3f42302e2.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -other: - - Gnocchi now leverages microseconds timestamps available since MySQL 5.6.4, - meaning it is now the minimum required version of MySQL. diff --git a/releasenotes/notes/noauth-force-headers-dda926ce83f810e8.yaml b/releasenotes/notes/noauth-force-headers-dda926ce83f810e8.yaml deleted file mode 100644 index 004ef170d..000000000 --- a/releasenotes/notes/noauth-force-headers-dda926ce83f810e8.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -other: - - >- - The `noauth` authentication mode now requires that the `X-User-Id` and/or - `X-Project-Id` to be present. diff --git a/releasenotes/notes/noauth-keystone-compat-e8f760591d593f07.yaml b/releasenotes/notes/noauth-keystone-compat-e8f760591d593f07.yaml deleted file mode 100644 index 0aaffc38e..000000000 --- a/releasenotes/notes/noauth-keystone-compat-e8f760591d593f07.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -upgrade: - - >- - The `auth_type` option has a new default value set to "basic". This mode - does not do any segregation and uses the standard HTTP `Authorization` - header for authentication. The old "noauth" authentication mechanism based - on the Keystone headers (`X-User-Id`, `X-Creator-Id` and `X-Roles`) and the - Keystone segregation rules, which was the default up to Gnocchi 3.0, is - still available. diff --git a/releasenotes/notes/pecan-debug-removed-1a9dbc4a0a6ad581.yaml b/releasenotes/notes/pecan-debug-removed-1a9dbc4a0a6ad581.yaml deleted file mode 100644 index 9098b81fd..000000000 --- a/releasenotes/notes/pecan-debug-removed-1a9dbc4a0a6ad581.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -upgrade: - - The api.pecan_debug has been removed. diff --git a/releasenotes/notes/redis-driver-299dc443170364bc.yaml b/releasenotes/notes/redis-driver-299dc443170364bc.yaml deleted file mode 100644 index b8214f272..000000000 --- a/releasenotes/notes/redis-driver-299dc443170364bc.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - | - A Redis driver has been introduced for storing incoming measures and - computed timeseries. diff --git a/releasenotes/notes/reloading-734a639a667c93ee.yaml b/releasenotes/notes/reloading-734a639a667c93ee.yaml deleted file mode 100644 index 0cf2eb730..000000000 --- a/releasenotes/notes/reloading-734a639a667c93ee.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - gnocchi-metricd now uses the cotyledon/oslo.config helper to handle - configuration file reloading. You can dynamically change the number - of workers by changing the configuration file and sending SIGHUP to the - metricd master process. diff --git a/releasenotes/notes/remove-legacy-ceilometer-resources-16da2061d6d3f506.yaml b/releasenotes/notes/remove-legacy-ceilometer-resources-16da2061d6d3f506.yaml deleted file mode 100644 index 4d6e0f875..000000000 --- a/releasenotes/notes/remove-legacy-ceilometer-resources-16da2061d6d3f506.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -deprecations: - - The creation of the legacy Ceilometer resource types has been removed. diff --git a/releasenotes/notes/removed-median-and-95pct-from-default-aggregation-methods-2f5ec059855e17f9.yaml b/releasenotes/notes/removed-median-and-95pct-from-default-aggregation-methods-2f5ec059855e17f9.yaml deleted file mode 100644 index 75ff241a6..000000000 --- a/releasenotes/notes/removed-median-and-95pct-from-default-aggregation-methods-2f5ec059855e17f9.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -other: - - The default archive policies list does not contain the 95pct and median - aggregation methods by default. These are the least used methods and should - make gnocchi-metricd faster by more than 25% in the default scenario. diff --git a/releasenotes/notes/resource-type-patch-8b6a85009db0671c.yaml b/releasenotes/notes/resource-type-patch-8b6a85009db0671c.yaml deleted file mode 100644 index a837c72da..000000000 --- a/releasenotes/notes/resource-type-patch-8b6a85009db0671c.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - |- - A new REST API endpoint have been added to be able to update a - resource-type: "PATCH /v1/resource-type/foobar". The expected payload is in - RFC6902 format. Some examples can be found in the documentation. diff --git a/releasenotes/notes/resource-type-required-attributes-f446c220d54c8eb7.yaml b/releasenotes/notes/resource-type-required-attributes-f446c220d54c8eb7.yaml deleted file mode 100644 index a91c8176c..000000000 --- a/releasenotes/notes/resource-type-required-attributes-f446c220d54c8eb7.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - When updating a resource attribute, it's now possible to pass the option - 'fill' for each attribute to fill existing resources. - - required=True is now supported when updating resource type. This requires - the option 'fill' to be set. diff --git a/releasenotes/notes/s3-bucket-limit-224951bb6a81ddce.yaml b/releasenotes/notes/s3-bucket-limit-224951bb6a81ddce.yaml deleted file mode 100644 index 1dba0232e..000000000 --- a/releasenotes/notes/s3-bucket-limit-224951bb6a81ddce.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - | - Previously, s3 storage driver stored aggregates in a bucket per metric. - This would quickly run into bucket limit set by s3. s3 storage driver is - fixed so it stores all aggregates for all metrics in a single bucket. - Buckets previously created by Gnocchi will need to be deleted as they will - no longer be handled. diff --git a/releasenotes/notes/s3_consistency_check_timeout-a30db3bd07a9a281.yaml b/releasenotes/notes/s3_consistency_check_timeout-a30db3bd07a9a281.yaml deleted file mode 100644 index 5b5426ee2..000000000 --- a/releasenotes/notes/s3_consistency_check_timeout-a30db3bd07a9a281.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - | - The S3 driver now checks for data consistency by default. S3 does not - guarantee read-after-write consistency when overwriting data. Gnocchi now - waits up to `s3_check_consistency_timeout` seconds before returning and - unlocking a metric for new processing. This makes sure that the data that - will be read by the next workers will be consistent and that no data will - be lost. This feature can be disabled by setting the value to 0. diff --git a/releasenotes/notes/s3_driver-4b30122bdbe0385d.yaml b/releasenotes/notes/s3_driver-4b30122bdbe0385d.yaml deleted file mode 100644 index 535c6d1e7..000000000 --- a/releasenotes/notes/s3_driver-4b30122bdbe0385d.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - New storage driver for AWS S3. - This new driver works in the same way that the Swift driver, expect that it - leverages the Amazon Web Services S3 object storage API. diff --git a/releasenotes/notes/storage-engine-v3-b34bd0723abf292f.yaml b/releasenotes/notes/storage-engine-v3-b34bd0723abf292f.yaml deleted file mode 100644 index cb2ef22a8..000000000 --- a/releasenotes/notes/storage-engine-v3-b34bd0723abf292f.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- -features: - - The Carbonara based storage engine has been updated and greatly improved. - It now features fast write for Ceph (no change for file and Swift based - drivers) by using an append method. - It also features on the fly data compression (using LZ4) of the aggregated - time serie, reducing the data space usage by at least 50 %. -upgrade: - - gnocchi-upgrade must be run before running the new version of - gnocchi-metricd and the HTTP REST API in order to upgrade from version 2 of - the Carbonara storage engine to version 3. It will read all metrics and - convert them to new version 3 serialization format (compressing the data), - which might take some time. diff --git a/releasenotes/notes/storage-incoming-586b3e81de8deb4f.yaml b/releasenotes/notes/storage-incoming-586b3e81de8deb4f.yaml deleted file mode 100644 index f1d63bb66..000000000 --- a/releasenotes/notes/storage-incoming-586b3e81de8deb4f.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - The storage of new measures that ought to be processed by *metricd* can now - be stored using different storage drivers. By default, the driver used is - still the regular storage driver configured. See the `[incoming]` section - in the configuration file. diff --git a/releasenotes/notes/swift_keystone_v3-606da8228fc13a32.yaml b/releasenotes/notes/swift_keystone_v3-606da8228fc13a32.yaml deleted file mode 100644 index 9a52e062b..000000000 --- a/releasenotes/notes/swift_keystone_v3-606da8228fc13a32.yaml +++ /dev/null @@ -1,3 +0,0 @@ ---- -features: - - Swift now supports authentication with Keystone v3 API. diff --git a/releasenotes/notes/upgrade-code-removal-from-2.2-and-3.0-a01fc64ecb39c327.yaml b/releasenotes/notes/upgrade-code-removal-from-2.2-and-3.0-a01fc64ecb39c327.yaml deleted file mode 100644 index bd0480ca5..000000000 --- a/releasenotes/notes/upgrade-code-removal-from-2.2-and-3.0-a01fc64ecb39c327.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -upgrade: - - | - The storage upgrade is only supported from version 3.1. diff --git a/releasenotes/notes/uuid5-change-8a8c467d2b2d4c85.yaml b/releasenotes/notes/uuid5-change-8a8c467d2b2d4c85.yaml deleted file mode 100644 index ec6b6c518..000000000 --- a/releasenotes/notes/uuid5-change-8a8c467d2b2d4c85.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -issues: - - >- - The conversion mechanism provided by the API to convert non-UUID resource - id to UUID is now also based on the user creating/accessing the resource. - This makes sure that the conversion generates a unique UUID for the user - and that several users can use the same string as `original_resource_id`. -upgrade: - - >- - Since `original_resource_id` is now unique per creator, that means users - cannot refer to resource by using the `original_resource_id` if the - resource was not created by them. diff --git a/releasenotes/notes/wsgi-script-deprecation-c6753a844ca0b411.yaml b/releasenotes/notes/wsgi-script-deprecation-c6753a844ca0b411.yaml deleted file mode 100644 index d2739ec71..000000000 --- a/releasenotes/notes/wsgi-script-deprecation-c6753a844ca0b411.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -deprecations: - - | - The custom gnocchi/rest/app.wsgi is now deprecated, the gnocchi-api binary - should be used as wsgi script file. For example, with uwsgi "--wsgi-file - /usr/lib/python2.7/gnocchi/rest/app.wsgi" should be replaced by - "--wsgi-file /usr/bin/gnocchi-api". diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index e06a0ecf7..000000000 --- a/requirements.txt +++ /dev/null @@ -1,24 +0,0 @@ -pbr -numpy>=1.9.0 -iso8601 -oslo.config>=3.22.0 -oslo.log>=2.3.0 -oslo.policy>=0.3.0 -oslo.middleware>=3.22.0 -pandas>=0.18.0 -scipy>=0.18.1 # BSD -pecan>=0.9 -futures -jsonpatch -cotyledon>=1.5.0 -six -stevedore -ujson -voluptuous -werkzeug -trollius; python_version < '3.4' -tenacity>=3.1.0 # Apache-2.0 -WebOb>=1.4.1 -Paste -PasteDeploy -monotonic diff --git a/run-func-tests.sh b/run-func-tests.sh deleted file mode 100755 index cf28931d5..000000000 --- a/run-func-tests.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash -x -set -e - -cleanup(){ - type -t indexer_stop >/dev/null && indexer_stop || true - type -t storage_stop >/dev/null && storage_stop || true -} -trap cleanup EXIT - -GNOCCHI_TEST_STORAGE_DRIVERS=${GNOCCHI_TEST_STORAGE_DRIVERS:-file} -GNOCCHI_TEST_INDEXER_DRIVERS=${GNOCCHI_TEST_INDEXER_DRIVERS:-postgresql} -for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS}; do - for indexer in ${GNOCCHI_TEST_INDEXER_DRIVERS}; do - case $storage in - ceph) - eval $(pifpaf -e STORAGE run ceph) - rados -c $STORAGE_CEPH_CONF mkpool gnocchi - STORAGE_URL=ceph://$STORAGE_CEPH_CONF - ;; - s3) - if ! which s3rver >/dev/null 2>&1 - then - mkdir -p npm-s3rver - export NPM_CONFIG_PREFIX=npm-s3rver - npm install s3rver --global - export PATH=$PWD/npm-s3rver/bin:$PATH - fi - eval $(pifpaf -e STORAGE run s3rver) - ;; - file) - STORAGE_URL=file:// - ;; - - swift|redis) - eval $(pifpaf -e STORAGE run $storage) - ;; - *) - echo "Unsupported storage backend by functional tests: $storage" - exit 1 - ;; - esac - - eval $(pifpaf -e INDEXER run $indexer) - - export GNOCCHI_SERVICE_TOKEN="" # Just make gabbi happy - export GNOCCHI_AUTHORIZATION="basic YWRtaW46" # admin in base64 - export OS_TEST_PATH=gnocchi/tests/functional_live - pifpaf -e GNOCCHI run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL --coordination-driver redis -- ./tools/pretty_tox.sh $* - - cleanup - done -done diff --git a/run-tests.sh b/run-tests.sh deleted file mode 100755 index 0e6d11f8b..000000000 --- a/run-tests.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -x -set -e -GNOCCHI_TEST_STORAGE_DRIVERS=${GNOCCHI_TEST_STORAGE_DRIVERS:-file} -GNOCCHI_TEST_INDEXER_DRIVERS=${GNOCCHI_TEST_INDEXER_DRIVERS:-postgresql} -for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS} -do - export GNOCCHI_TEST_STORAGE_DRIVER=$storage - for indexer in ${GNOCCHI_TEST_INDEXER_DRIVERS} - do - case $GNOCCHI_TEST_STORAGE_DRIVER in - ceph|redis) - pifpaf run $GNOCCHI_TEST_STORAGE_DRIVER -- pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $* - ;; - s3) - if ! which s3rver >/dev/null 2>&1 - then - mkdir npm-s3rver - export NPM_CONFIG_PREFIX=npm-s3rver - npm install s3rver --global - export PATH=$PWD/npm-s3rver/bin:$PATH - fi - pifpaf -e GNOCCHI_STORAGE run s3rver -- \ - pifpaf -e GNOCCHI_INDEXER run $indexer -- \ - ./tools/pretty_tox.sh $* - ;; - *) - pifpaf -g GNOCCHI_INDEXER_URL run $indexer -- ./tools/pretty_tox.sh $* - ;; - esac - done -done diff --git a/run-upgrade-tests.sh b/run-upgrade-tests.sh deleted file mode 100755 index be2d188b5..000000000 --- a/run-upgrade-tests.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/bin/bash -set -e - -export GNOCCHI_DATA=$(mktemp -d -t gnocchi.XXXX) - -GDATE=$((which gdate >/dev/null && echo gdate) || echo date) - -old_version=$(pip freeze | sed -n '/gnocchi==/s/.*==\(.*\)/\1/p') - -RESOURCE_IDS=( - "5a301761-aaaa-46e2-8900-8b4f6fe6675a" - "5a301761-bbbb-46e2-8900-8b4f6fe6675a" - "5a301761-cccc-46e2-8900-8b4f6fe6675a" - "non-uuid" -) - -dump_data(){ - dir="$1" - mkdir -p $dir - echo "* Dumping measures aggregations to $dir" - gnocchi resource list -c id -c type -c project_id -c user_id -c original_resource_id -c started_at -c ended_at -c revision_start -c revision_end | tee $dir/resources.list - for resource_id in ${RESOURCE_IDS[@]} $RESOURCE_ID_EXT; do - for agg in min max mean sum ; do - gnocchi measures show --aggregation $agg --resource-id $resource_id metric > $dir/${agg}.txt - done - done -} - -inject_data() { - echo "* Injecting measures in Gnocchi" - # TODO(sileht): Generate better data that ensure we have enought split that cover all - # situation - - for resource_id in ${RESOURCE_IDS[@]}; do - gnocchi resource create generic --attribute id:$resource_id -n metric:high > /dev/null - done - - { - measures_sep="" - MEASURES=$(for i in $(seq 0 10 288000); do - now=$($GDATE --iso-8601=s -d "-${i}minute") ; value=$((RANDOM % 13 + 52)) - echo -n "$measures_sep {\"timestamp\": \"$now\", \"value\": $value }" - measures_sep="," - done) - echo -n '{' - resource_sep="" - for resource_id in ${RESOURCE_IDS[@]} $RESOURCE_ID_EXT; do - echo -n "$resource_sep \"$resource_id\": { \"metric\": [ $MEASURES ] }" - resource_sep="," - done - echo -n '}' - } | gnocchi measures batch-resources-metrics - - - echo "* Waiting for measures computation" - while [ $(gnocchi status -f value -c "storage/total number of measures to process") -gt 0 ]; do sleep 1 ; done -} - -pifpaf_stop(){ - : -} - -cleanup(){ - pifpaf_stop - rm -rf $GNOCCHI_DATA -} -trap cleanup EXIT - - -if [ "$STORAGE_DAEMON" == "ceph" ]; then - rados -c $STORAGE_CEPH_CONF mkpool gnocchi - STORAGE_URL=ceph://$STORAGE_CEPH_CONF -else - STORAGE_URL=file://$GNOCCHI_DATA -fi - -eval $(pifpaf run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) -export OS_AUTH_TYPE=gnocchi-basic -export GNOCCHI_USER=$GNOCCHI_USER_ID -original_statsd_resource_id=$GNOCCHI_STATSD_RESOURCE_ID -inject_data $GNOCCHI_DATA -dump_data $GNOCCHI_DATA/old -pifpaf_stop - -new_version=$(python setup.py --version) -echo "* Upgrading Gnocchi from $old_version to $new_version" -pip install -q -U .[${GNOCCHI_VARIANT}] - -eval $(pifpaf --debug run gnocchi --indexer-url $INDEXER_URL --storage-url $STORAGE_URL) -# Gnocchi 3.1 uses basic auth by default -export OS_AUTH_TYPE=gnocchi-basic -export GNOCCHI_USER=$GNOCCHI_USER_ID - -# pifpaf creates a new statsd resource on each start -gnocchi resource delete $GNOCCHI_STATSD_RESOURCE_ID - -dump_data $GNOCCHI_DATA/new - -echo "* Checking output difference between Gnocchi $old_version and $new_version" -diff -uNr $GNOCCHI_DATA/old $GNOCCHI_DATA/new diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 6675c97bf..000000000 --- a/setup.cfg +++ /dev/null @@ -1,158 +0,0 @@ -[metadata] -name = gnocchi -url = http://launchpad.net/gnocchi -summary = Metric as a Service -description-file = - README.rst -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = http://gnocchi.xyz -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3.5 - Topic :: System :: Monitoring - -[extras] -keystone = - keystonemiddleware>=4.0.0 -mysql = - pymysql - oslo.db>=4.8.0,!=4.13.1,!=4.13.2,!=4.15.0 - sqlalchemy - sqlalchemy-utils - alembic>=0.7.6,!=0.8.1,!=0.9.0 -postgresql = - psycopg2 - oslo.db>=4.8.0,!=4.13.1,!=4.13.2,!=4.15.0 - sqlalchemy - sqlalchemy-utils - alembic>=0.7.6,!=0.8.1,!=0.9.0 -s3 = - boto3 - botocore>=1.5 - lz4>=0.9.0 - tooz>=1.38 -redis = - redis>=2.10.0 # MIT - lz4>=0.9.0 - tooz>=1.38 -swift = - python-swiftclient>=3.1.0 - lz4>=0.9.0 - tooz>=1.38 -ceph = - lz4>=0.9.0 - tooz>=1.38 -ceph_recommended_lib = - cradox>=1.0.9 -ceph_alternative_lib = - python-rados>=10.1.0 # not available on pypi -file = - lz4>=0.9.0 - tooz>=1.38 -doc = - sphinx<1.6.0 - sphinx_rtd_theme - sphinxcontrib-httpdomain - PyYAML - Jinja2 - reno>=1.6.2 -test = - pifpaf>=1.0.1 - gabbi>=1.30.0 - coverage>=3.6 - fixtures - mock - oslotest - python-subunit>=0.0.18 - os-testr - testrepository - testscenarios - testresources>=0.2.4 # Apache-2.0/BSD - testtools>=0.9.38 - WebTest>=2.0.16 - doc8 - tooz>=1.38 - keystonemiddleware>=4.0.0 - wsgi_intercept>=1.4.1 -test-swift = - python-swiftclient - -[global] -setup-hooks = - pbr.hooks.setup_hook - -[build_py] -pre-hook.build_config = gnocchi.genconfig.prehook - -[files] -packages = - gnocchi - -[entry_points] -gnocchi.indexer.sqlalchemy.resource_type_attribute = - string = gnocchi.indexer.sqlalchemy_extension:StringSchema - uuid = gnocchi.indexer.sqlalchemy_extension:UUIDSchema - number = gnocchi.indexer.sqlalchemy_extension:NumberSchema - bool = gnocchi.indexer.sqlalchemy_extension:BoolSchema - -gnocchi.storage = - swift = gnocchi.storage.swift:SwiftStorage - ceph = gnocchi.storage.ceph:CephStorage - file = gnocchi.storage.file:FileStorage - s3 = gnocchi.storage.s3:S3Storage - redis = gnocchi.storage.redis:RedisStorage - -gnocchi.incoming = - ceph = gnocchi.storage.incoming.ceph:CephStorage - file = gnocchi.storage.incoming.file:FileStorage - swift = gnocchi.storage.incoming.swift:SwiftStorage - s3 = gnocchi.storage.incoming.s3:S3Storage - redis = gnocchi.storage.incoming.redis:RedisStorage - -gnocchi.indexer = - mysql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer - mysql+pymysql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer - postgresql = gnocchi.indexer.sqlalchemy:SQLAlchemyIndexer - -gnocchi.aggregates = - moving-average = gnocchi.aggregates.moving_stats:MovingAverage - -gnocchi.rest.auth_helper = - noauth = gnocchi.rest.auth_helper:NoAuthHelper - keystone = gnocchi.rest.auth_helper:KeystoneAuthHelper - basic = gnocchi.rest.auth_helper:BasicAuthHelper - -console_scripts = - gnocchi-config-generator = gnocchi.cli:config_generator - gnocchi-upgrade = gnocchi.cli:upgrade - gnocchi-change-sack-size = gnocchi.cli:change_sack_size - gnocchi-statsd = gnocchi.cli:statsd - gnocchi-metricd = gnocchi.cli:metricd - -wsgi_scripts = - gnocchi-api = gnocchi.rest.app:build_wsgi_app - -oslo.config.opts = - gnocchi = gnocchi.opts:list_opts - -oslo.config.opts.defaults = - gnocchi = gnocchi.opts:set_defaults - -tempest.test_plugins = - gnocchi_tests = gnocchi.tempest.plugin:GnocchiTempestPlugin - -[build_sphinx] -all_files = 1 -build-dir = doc/build -source-dir = doc/source - -[wheel] -universal = 1 diff --git a/setup.py b/setup.py deleted file mode 100755 index b96f524bb..000000000 --- a/setup.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import setuptools - -setuptools.setup( - setup_requires=['pbr'], - pbr=True) diff --git a/tools/duration_perf_analyse.py b/tools/duration_perf_analyse.py deleted file mode 100644 index a6e35ad9f..000000000 --- a/tools/duration_perf_analyse.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Tools to analyse the result of multiple call of duration_perf_test.py: -# -# $ clients=10 -# $ parallel --progress -j $clients python duration_perf_test.py \ -# --result myresults/client{} ::: $(seq 0 $clients) -# $ python duration_perf_analyse.py myresults -# * get_measures: -# Time -# count 1000.000000 -# mean 0.032090 -# std 0.028287 -# ... -# - - -import argparse -import os - -import pandas - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('result', - help=('Path of the results of perf_tool.py.'), - default='result') - - data = { - 'get_measures': [], - 'write_measures': [], - 'write_metric': [], - } - args = parser.parse_args() - for root, dirs, files in os.walk(args.result): - for name in files: - for method in data: - if name.endswith('_%s.csv' % method): - datum = data[method] - filepath = os.path.join(root, name) - datum.append(pandas.read_csv(filepath)) - cname = name.replace('_%s.csv' % method, '') - datum[-1].rename(columns={'Duration': cname}, inplace=True) - - for method in data: - merged = pandas.DataFrame(columns=['Index', 'Duration']) - append = pandas.DataFrame(columns=['Duration']) - for datum in data[method]: - datum.dropna(axis=1, inplace=True) - datum.drop('Count', axis=1, inplace=True) - merged = merged.merge(datum, on='Index') - cname = datum.columns.values[1] - datum.rename(columns={cname: 'Duration'}, inplace=True) - append = append.append(datum.drop('Index', axis=1)) - merged.to_csv(os.path.join(args.result, '%s_merged.csv' % method), - index=False) - print("* %s:" % method) - print(append.describe()) - print("") - -if __name__ == '__main__': - main() diff --git a/tools/duration_perf_test.py b/tools/duration_perf_test.py deleted file mode 100644 index 275cb05c3..000000000 --- a/tools/duration_perf_test.py +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Tools to measure the duration of a get and a write request, can be used like: -# -# $ python duration_perf_test.py -# -# or to simulate multiple clients workload: -# -# $ clients=10 -# $ parallel --progress -j $clients python duration_perf_test.py \ -# --result myresults/client{} ::: $(seq 0 $clients) -# $ python duration_perf_analyse.py myresults -# * get_measures: -# Time -# count 1000.000000 -# mean 0.032090 -# std 0.028287 -# ... -# - -import argparse -import datetime -import json -import os -import random -import time - -from keystoneclient.v2_0 import client as keystone_client -import requests - - -def timer(func): - def inner(self, index, *args, **kwargs): - start = time.time() - count = func(self, index, *args, **kwargs) - elapsed = time.time() - start - self._timers.setdefault(func.__name__, []).append( - (index, elapsed, count) - ) - print(("{name} #{index} processed " - "{count} objects in {elapsed} sec").format( - name=func.__name__, - index=index, - count=count or 0, - elapsed=elapsed)) - return count - return inner - - -class PerfTools(object): - def __init__(self, args): - self.args = args - self.keystone = keystone_client.Client( - username=args.username, - password=args.password, - tenant_name=args.tenant_name, - auth_url=args.auth_url) - self.headers = {'X-Auth-Token': self.keystone.auth_token, - 'Content-Type': 'application/json'} - self._metrics = [] - self._timers = {} - self.timestamp = datetime.datetime.utcnow() - - @timer - def write_metric(self, index): - data = json.dumps({"archive_policy_name": self.args.archive_policy}) - resp = requests.post(self.args.gnocchi_url + "/v1/metric", - data=data, headers=self.headers) - try: - self._metrics.append(json.loads(resp.content)["id"]) - except Exception: - raise RuntimeError("Can't continue without all metrics created " - "(%s)" % resp.content) - - @timer - def write_measures(self, index, metric): - data = [] - for i in range(self.args.batch_size): - self.timestamp += datetime.timedelta(minutes=1) - data.append({'timestamp': self.timestamp.isoformat(), - 'value': 100}) - resp = requests.post( - "%s/v1/metric/%s/measures" % (self.args.gnocchi_url, metric), - data=json.dumps(data), - headers=self.headers) - if resp.status_code / 100 != 2: - print('Failed POST request to measures #%d: %s' % (index, - resp.content)) - return 0 - return self.args.batch_size - - @timer - def get_measures(self, index, metric): - resp = requests.get( - "%s/v1/metric/%s/measures" % (self.args.gnocchi_url, metric), - headers=self.headers) - try: - return len(json.loads(resp.content)) - except Exception: - print('Failed GET request to measures #%d: %s' % (index, - resp.content)) - return 0 - - def _get_random_metric(self): - return self._metrics[random.randint(0, len(self._metrics) - 1)] - - def run(self): - try: - for index in range(self.args.metric_count): - self.write_metric(index) - - for index in range(self.args.measure_count): - metric = self._get_random_metric() - self.write_measures(index, metric) - self.get_measures(index, metric) - finally: - self.dump_logs() - - def dump_logs(self): - for name, data in self._timers.items(): - filepath = "%s_%s.csv" % (self.args.result_path, name) - dirpath = os.path.dirname(filepath) - if dirpath and not os.path.exists(dirpath): - os.makedirs(dirpath) - with open(filepath, 'w') as f: - f.write("Index,Duration,Count\n") - for meter in data: - f.write("%s\n" % ",".join("%.2f" % (m if m else 0) - for m in meter)) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--metric-count", - help=('Number of metrics to be created. ' - 'metrics are created one by one.'), - default=100, - type=int) - parser.add_argument("--measure-count", - help='Number of measures batches to be sent.', - default=100, - type=int) - parser.add_argument("--gnocchi-url", - help='Gnocchi API URL to use.', - default="http://localhost:8041") - parser.add_argument("--archive-policy", - help='Archive policy to use.', - default="low") - parser.add_argument("--os-username", - dest='username', - help='User name to use for OpenStack service access.', - default="admin") - parser.add_argument("--os-tenant-name", - dest='tenant_name', - help=('Tenant name to use for ' - 'OpenStack service access.'), - default="admin") - parser.add_argument("--os-password", - dest='password', - help='Password to use for OpenStack service access.', - default="password") - parser.add_argument("--os-auth-url", - dest='auth_url', - help='Auth URL to use for OpenStack service access.', - default="http://localhost:5000/v2.0") - parser.add_argument("--result", - help='path prefix to write results to.', - dest='result_path', - default="./perf_gnocchi") - parser.add_argument("--batch-size", - dest='batch_size', - help='Number of measurements in the batch.', - default=100, - type=int) - PerfTools(parser.parse_args()).run() - -if __name__ == '__main__': - main() diff --git a/tools/gnocchi-archive-policy-size.py b/tools/gnocchi-archive-policy-size.py deleted file mode 100755 index f3fbe7840..000000000 --- a/tools/gnocchi-archive-policy-size.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (c) 2016 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -from gnocchi import utils - - -WORST_CASE_BYTES_PER_POINT = 8.04 - - -if (len(sys.argv) - 1) % 2 != 0: - print("Usage: %s ... " - % sys.argv[0]) - sys.exit(1) - - -def sizeof_fmt(num, suffix='B'): - for unit in ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi'): - if abs(num) < 1024.0: - return "%3.1f%s%s" % (num, unit, suffix) - num /= 1024.0 - return "%.1f%s%s" % (num, 'Yi', suffix) - - -size = 0 -for g, t in utils.grouper(sys.argv[1:], 2): - granularity = utils.to_timespan(g) - timespan = utils.to_timespan(t) - points = timespan.total_seconds() / granularity.total_seconds() - cursize = points * WORST_CASE_BYTES_PER_POINT - size += cursize - print("%s over %s = %d points = %s" % (g, t, points, sizeof_fmt(cursize))) - -print("Total: " + sizeof_fmt(size)) diff --git a/tools/measures_injector.py b/tools/measures_injector.py deleted file mode 100755 index ebaef5201..000000000 --- a/tools/measures_injector.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2016 Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import random -import uuid - -from concurrent import futures -from oslo_config import cfg -import six - -from gnocchi import indexer -from gnocchi import service -from gnocchi import storage -from gnocchi import utils - - -def injector(): - conf = cfg.ConfigOpts() - conf.register_cli_opts([ - cfg.IntOpt("metrics", default=1, min=1), - cfg.StrOpt("archive-policy-name", default="low"), - cfg.StrOpt("creator", default="admin"), - cfg.IntOpt("batch-of-measures", default=1000), - cfg.IntOpt("measures-per-batch", default=10), - ]) - conf = service.prepare_service(conf=conf) - index = indexer.get_driver(conf) - index.connect() - s = storage.get_driver(conf) - - def todo(): - metric = index.create_metric( - uuid.uuid4(), - creator=conf.creator, - archive_policy_name=conf.archive_policy_name) - - for _ in six.moves.range(conf.batch_of_measures): - measures = [ - storage.Measure( - utils.dt_in_unix_ns(utils.utcnow()), random.random()) - for __ in six.moves.range(conf.measures_per_batch)] - s.incoming.add_measures(metric, measures) - - with futures.ThreadPoolExecutor(max_workers=conf.metrics) as executor: - for m in six.moves.range(conf.metrics): - executor.submit(todo) - - -if __name__ == '__main__': - injector() diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh deleted file mode 100755 index 799ac1848..000000000 --- a/tools/pretty_tox.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -set -o pipefail - -TESTRARGS=$1 - -# --until-failure is not compatible with --subunit see: -# -# https://bugs.launchpad.net/testrepository/+bug/1411804 -# -# this work around exists until that is addressed -if [[ "$TESTARGS" =~ "until-failure" ]]; then - python setup.py testr --slowest --testr-args="$TESTRARGS" -else - python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f -fi diff --git a/tools/travis-ci-setup.dockerfile b/tools/travis-ci-setup.dockerfile deleted file mode 100644 index be2179bcd..000000000 --- a/tools/travis-ci-setup.dockerfile +++ /dev/null @@ -1,41 +0,0 @@ -FROM ubuntu:16.04 -ENV GNOCCHI_SRC /home/tester/src -ENV DEBIAN_FRONTEND noninteractive - -RUN apt-get update -y && apt-get install -qy \ - locales \ - git \ - wget \ - nodejs \ - nodejs-legacy \ - npm \ - python \ - python3 \ - python-dev \ - python3-dev \ - python-pip \ - redis-server \ - build-essential \ - libffi-dev \ - libpq-dev \ - postgresql \ - mysql-client \ - mysql-server \ - librados-dev \ - liberasurecode-dev \ - ceph \ - && apt-get clean -y - -#NOTE(sileht): really no utf-8 in 2017 !? -ENV LANG en_US.UTF-8 -RUN update-locale -RUN locale-gen $LANG - -#NOTE(sileht): Upgrade python dev tools -RUN pip install -U pip tox virtualenv - -RUN useradd -ms /bin/bash tester -RUN mkdir $GNOCCHI_SRC -RUN chown -R tester: $GNOCCHI_SRC -USER tester -WORKDIR $GNOCCHI_SRC diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 415d5e6a7..000000000 --- a/tox.ini +++ /dev/null @@ -1,139 +0,0 @@ -[tox] -minversion = 2.4 -envlist = py{35,27}-{postgresql,mysql}{,-file,-swift,-ceph,-s3},pep8,bashate - -[testenv] -usedevelop = True -sitepackages = False -passenv = LANG OS_DEBUG OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE GNOCCHI_TEST_* AWS_* -setenv = - GNOCCHI_TEST_STORAGE_DRIVER=file - GNOCCHI_TEST_INDEXER_DRIVER=postgresql - GNOCCHI_TEST_STORAGE_DRIVERS=file swift ceph s3 redis - GNOCCHI_TEST_INDEXER_DRIVERS=postgresql mysql - file: GNOCCHI_TEST_STORAGE_DRIVERS=file - swift: GNOCCHI_TEST_STORAGE_DRIVERS=swift - ceph: GNOCCHI_TEST_STORAGE_DRIVERS=ceph - redis: GNOCCHI_TEST_STORAGE_DRIVERS=redis - s3: GNOCCHI_TEST_STORAGE_DRIVERS=s3 - postgresql: GNOCCHI_TEST_INDEXER_DRIVERS=postgresql - mysql: GNOCCHI_TEST_INDEXER_DRIVERS=mysql - - GNOCCHI_STORAGE_DEPS=file,swift,test-swift,s3,ceph,ceph_recommended_lib,redis - ceph: GNOCCHI_STORAGE_DEPS=ceph,ceph_recommended_lib - swift: GNOCCHI_STORAGE_DEPS=swift,test-swift - file: GNOCCHI_STORAGE_DEPS=file - redis: GNOCCHI_STORAGE_DEPS=redis - s3: GNOCCHI_STORAGE_DEPS=s3 - - # FIXME(sileht): pbr doesn't support url in setup.cfg extras, so we do this crap - GNOCCHI_TEST_TARBALLS=http://tarballs.openstack.org/swift/swift-master.tar.gz#egg=swift - ceph: GNOCCHI_TEST_TARBALLS= - swift: GNOCCHI_TEST_TARBALLS=http://tarballs.openstack.org/swift/swift-master.tar.gz#egg=swift - s3: GNOCCHI_TEST_TARBALLS= - redis: GNOCCHI_TEST_TARBALLS= - file: GNOCCHI_TEST_TARBALLS= -deps = .[test] - postgresql: .[postgresql,{env:GNOCCHI_STORAGE_DEPS}] - mysql: .[mysql,{env:GNOCCHI_STORAGE_DEPS}] - {env:GNOCCHI_TEST_TARBALLS:} -# NOTE(tonyb): This project has chosen to *NOT* consume upper-constraints.txt -commands = - doc8 --ignore-path doc/source/rest.rst doc/source - gnocchi-config-generator - {toxinidir}/run-tests.sh {posargs} - {toxinidir}/run-func-tests.sh {posargs} - -[testenv:py35-postgresql-file-upgrade-from-3.1] -# We should always recreate since the script upgrade -# Gnocchi we can't reuse the virtualenv -# FIXME(sileht): We set alembic version until next Gnocchi 3.1 is released -envdir = upgrade -recreate = True -skip_install = True -usedevelop = False -setenv = GNOCCHI_VARIANT=test,postgresql,file -deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2 - alembic<0.9.0 - pifpaf>=0.13 - gnocchiclient>=2.8.0 -commands = pifpaf --env-prefix INDEXER run postgresql {toxinidir}/run-upgrade-tests.sh {posargs} - -[testenv:py27-mysql-ceph-upgrade-from-3.1] -# We should always recreate since the script upgrade -# Gnocchi we can't reuse the virtualenv -# FIXME(sileht): We set alembic version until next Gnocchi 3.1 is released -envdir = upgrade -recreate = True -skip_install = True -usedevelop = False -setenv = GNOCCHI_VARIANT=test,mysql,ceph,ceph_recommended_lib -deps = gnocchi[{env:GNOCCHI_VARIANT}]>=3.1,<3.2 - alembic<0.9.0 - gnocchiclient>=2.8.0 - pifpaf>=0.13 -commands = pifpaf --env-prefix INDEXER run mysql -- pifpaf --env-prefix STORAGE run ceph {toxinidir}/run-upgrade-tests.sh {posargs} - -[testenv:bashate] -deps = bashate -commands = bashate -v devstack/plugin.sh devstack/gate/gate_hook.sh devstack/gate/post_test_hook.sh -whitelist_externals = bash - -[testenv:pep8] -deps = hacking>=0.12,<0.13 -commands = flake8 - -[testenv:py27-gate] -setenv = OS_TEST_PATH=gnocchi/tests/functional_live - GABBI_LIVE=1 -passenv = {[testenv]passenv} GNOCCHI_SERVICE* GNOCCHI_AUTHORIZATION -sitepackages = True -basepython = python2.7 -commands = {toxinidir}/tools/pretty_tox.sh '{posargs}' - -# This target provides a shortcut to running just the gabbi tests. -[testenv:py27-gabbi] -deps = .[test,postgresql,file] -setenv = OS_TEST_PATH=gnocchi/tests/functional -basepython = python2.7 -commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- {toxinidir}/tools/pretty_tox.sh '{posargs}' - -[testenv:py27-cover] -commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py testr --coverage --testr-args="{posargs}" - -[testenv:venv] -# This is used by the doc job on the gate -deps = {[testenv:docs]deps} -commands = pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- {posargs} - -[flake8] -exclude = .tox,.eggs,doc -show-source = true -enable-extensions = H904 - -[testenv:genconfig] -deps = .[mysql,postgresql,test,file,ceph,swift,s3] -commands = gnocchi-config-generator - -[testenv:docs] -basepython = python2.7 -## This does not work, see: https://github.com/tox-dev/tox/issues/509 -# deps = {[testenv]deps} -# .[postgresql,doc] -# setenv = GNOCCHI_STORAGE_DEPS=file -deps = .[test,file,postgresql,doc] -commands = doc8 --ignore-path doc/source/rest.rst doc/source - pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- python setup.py build_sphinx -W - -[testenv:docs-gnocchi.xyz] -basepython = python2.7 -setenv = GNOCCHI_STORAGE_DEPS=file -deps = {[testenv:docs]deps} - sphinxcontrib-versioning -# for 2.x doc - pytimeparse - retrying -# for 3.x doc - oslosphinx -commands = - pifpaf -g GNOCCHI_INDEXER_URL run postgresql -- sphinx-versioning build doc/source doc/build/html