From ba4f572fd3014b7b74f6c90ad6f3bc901e1cc152 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Thu, 28 Apr 2016 16:17:09 -0500 Subject: [PATCH] Remove code that is not related to events storage and API Change-Id: I63128835613eb5959244c2fd34bc266ddcf4251c --- .gitignore | 3 - README.rst | 3 - ceilometer/agent/__init__.py | 0 ceilometer/agent/discovery/__init__.py | 0 ceilometer/agent/discovery/endpoint.py | 46 - ceilometer/agent/discovery/localnode.py | 21 - ceilometer/agent/discovery/tenant.py | 32 - ceilometer/agent/manager.py | 509 --- ceilometer/agent/plugin_base.py | 270 -- ceilometer/api/app.py | 8 +- ceilometer/api/controllers/v2/base.py | 18 - ceilometer/api/controllers/v2/capabilities.py | 31 +- ceilometer/api/controllers/v2/meters.py | 505 --- ceilometer/api/controllers/v2/query.py | 359 --- ceilometer/api/controllers/v2/resources.py | 157 - ceilometer/api/controllers/v2/root.py | 165 - ceilometer/api/controllers/v2/samples.py | 145 - ceilometer/api/controllers/v2/utils.py | 266 -- ceilometer/api/hooks.py | 46 +- ceilometer/cmd/agent_notification.py | 29 - ceilometer/cmd/collector.py | 29 - ceilometer/cmd/polling.py | 84 - ceilometer/cmd/sample.py | 93 - ceilometer/cmd/storage.py | 14 +- ceilometer/collector.py | 184 -- ceilometer/compute/__init__.py | 0 ceilometer/compute/discovery.py | 87 - ceilometer/compute/notifications/__init__.py | 41 - ceilometer/compute/notifications/instance.py | 89 - ceilometer/compute/pollsters/__init__.py | 77 - ceilometer/compute/pollsters/cpu.py | 93 - ceilometer/compute/pollsters/disk.py | 694 ----- ceilometer/compute/pollsters/instance.py | 33 - ceilometer/compute/pollsters/memory.py | 110 - ceilometer/compute/pollsters/net.py | 210 -- ceilometer/compute/pollsters/util.py | 96 - ceilometer/compute/util.py | 66 - ceilometer/compute/virt/__init__.py | 0 ceilometer/compute/virt/hyperv/__init__.py | 0 ceilometer/compute/virt/hyperv/inspector.py | 159 - ceilometer/compute/virt/inspector.py | 315 -- ceilometer/compute/virt/libvirt/__init__.py | 0 ceilometer/compute/virt/libvirt/inspector.py | 228 -- ceilometer/compute/virt/vmware/__init__.py | 0 ceilometer/compute/virt/vmware/inspector.py | 199 -- .../compute/virt/vmware/vsphere_operations.py | 230 -- ceilometer/compute/virt/xenapi/__init__.py | 0 ceilometer/compute/virt/xenapi/inspector.py | 192 -- ceilometer/coordination.py | 229 -- ceilometer/declarative.py | 188 -- ceilometer/dispatcher/__init__.py | 70 +- ceilometer/dispatcher/database.py | 57 +- ceilometer/dispatcher/file.py | 85 - ceilometer/dispatcher/gnocchi.py | 469 --- ceilometer/dispatcher/http.py | 118 - ceilometer/energy/__init__.py | 0 ceilometer/energy/kwapi.py | 124 - ceilometer/event/converter.py | 294 -- ceilometer/event/endpoint.py | 67 - ceilometer/event/storage/impl_mongodb.py | 29 +- ceilometer/event/storage/impl_sqlalchemy.py | 9 +- ceilometer/event/trait_plugins.py | 230 -- ceilometer/exchange_control.py | 47 - ceilometer/hardware/__init__.py | 0 ceilometer/hardware/discovery.py | 98 - ceilometer/hardware/inspector/__init__.py | 26 - ceilometer/hardware/inspector/base.py | 47 - ceilometer/hardware/inspector/snmp.py | 313 -- ceilometer/hardware/pollsters/__init__.py | 0 ceilometer/hardware/pollsters/data/snmp.yaml | 189 -- ceilometer/hardware/pollsters/generic.py | 218 -- ceilometer/hardware/pollsters/util.py | 63 - ceilometer/image/__init__.py | 0 ceilometer/image/glance.py | 129 - ceilometer/ipmi/__init__.py | 0 ceilometer/ipmi/notifications/__init__.py | 0 ceilometer/ipmi/notifications/ironic.py | 174 -- ceilometer/ipmi/platform/__init__.py | 0 ceilometer/ipmi/platform/exception.py | 22 - .../ipmi/platform/intel_node_manager.py | 342 -- ceilometer/ipmi/platform/ipmi_sensor.py | 113 - ceilometer/ipmi/platform/ipmitool.py | 132 - ceilometer/ipmi/pollsters/__init__.py | 29 - ceilometer/ipmi/pollsters/node.py | 180 -- ceilometer/ipmi/pollsters/sensor.py | 130 - ceilometer/keystone_client.py | 78 - .../de/LC_MESSAGES/ceilometer-log-error.po | 138 - .../de/LC_MESSAGES/ceilometer-log-info.po | 145 - .../de/LC_MESSAGES/ceilometer-log-warning.po | 125 - .../locale/de/LC_MESSAGES/ceilometer.po | 522 ---- .../es/LC_MESSAGES/ceilometer-log-error.po | 132 - .../es/LC_MESSAGES/ceilometer-log-info.po | 139 - .../locale/es/LC_MESSAGES/ceilometer.po | 511 --- .../locale/fr/LC_MESSAGES/ceilometer.po | 516 ---- .../locale/it/LC_MESSAGES/ceilometer.po | 505 --- .../locale/ja/LC_MESSAGES/ceilometer.po | 506 --- .../ko_KR/LC_MESSAGES/ceilometer-log-error.po | 135 - .../ko_KR/LC_MESSAGES/ceilometer-log-info.po | 128 - .../LC_MESSAGES/ceilometer-log-warning.po | 155 - .../locale/ko_KR/LC_MESSAGES/ceilometer.po | 484 --- .../locale/pt_BR/LC_MESSAGES/ceilometer.po | 492 --- .../locale/ru/LC_MESSAGES/ceilometer.po | 495 --- .../locale/zh_CN/LC_MESSAGES/ceilometer.po | 465 --- .../locale/zh_TW/LC_MESSAGES/ceilometer.po | 455 --- ceilometer/messaging.py | 88 - ceilometer/meter/__init__.py | 0 ceilometer/meter/data/meters.yaml | 815 ----- ceilometer/meter/notifications.py | 230 -- ceilometer/middleware.py | 71 - ceilometer/network/__init__.py | 0 ceilometer/network/floatingip.py | 62 - ceilometer/network/notifications.py | 258 -- ceilometer/network/services/__init__.py | 0 ceilometer/network/services/base.py | 48 - ceilometer/network/services/discovery.py | 118 - ceilometer/network/services/fwaas.py | 94 - ceilometer/network/services/lbaas.py | 464 --- ceilometer/network/services/vpnaas.py | 104 - ceilometer/network/statistics/__init__.py | 100 - ceilometer/network/statistics/driver.py | 29 - ceilometer/network/statistics/flow.py | 53 - .../statistics/opencontrail/__init__.py | 0 .../network/statistics/opencontrail/client.py | 129 - .../network/statistics/opencontrail/driver.py | 199 -- .../statistics/opendaylight/__init__.py | 0 .../network/statistics/opendaylight/client.py | 240 -- .../network/statistics/opendaylight/driver.py | 448 --- ceilometer/network/statistics/port.py | 109 - ceilometer/network/statistics/switch.py | 25 - ceilometer/network/statistics/table.py | 46 - ceilometer/neutron_client.py | 418 --- ceilometer/notification.py | 340 -- ceilometer/nova_client.py | 171 - ceilometer/objectstore/__init__.py | 0 ceilometer/objectstore/rgw.py | 210 -- ceilometer/objectstore/rgw_client.py | 72 - ceilometer/objectstore/swift.py | 202 -- ceilometer/opts.py | 97 +- ceilometer/pipeline.py | 866 ------ ceilometer/publisher/__init__.py | 48 - ceilometer/publisher/direct.py | 59 - ceilometer/publisher/file.py | 104 - ceilometer/publisher/http.py | 137 - ceilometer/publisher/kafka_broker.py | 96 - ceilometer/publisher/messaging.py | 221 -- ceilometer/publisher/test.py | 43 - ceilometer/publisher/udp.py | 74 - ceilometer/publisher/utils.py | 143 - ceilometer/sample.py | 109 - ceilometer/service.py | 52 - ceilometer/service_base.py | 153 - ceilometer/storage/__init__.py | 96 +- ceilometer/storage/base.py | 204 -- ceilometer/storage/hbase/migration.py | 103 - ceilometer/storage/hbase/utils.py | 143 - ceilometer/storage/impl_hbase.py | 439 --- ceilometer/storage/impl_log.py | 131 - ceilometer/storage/impl_mongodb.py | 679 ---- ceilometer/storage/impl_sqlalchemy.py | 822 ----- ceilometer/storage/models.py | 148 - ceilometer/storage/mongo/utils.py | 377 --- ceilometer/storage/pymongo_base.py | 178 -- .../storage/sqlalchemy/migrate_repo/README | 4 - .../sqlalchemy/migrate_repo/__init__.py | 0 .../storage/sqlalchemy/migrate_repo/manage.py | 5 - .../sqlalchemy/migrate_repo/migrate.cfg | 25 - .../versions/001_add_meter_table.py | 95 - .../versions/002_remove_duration.py | 23 - .../versions/003_set_utf8_charset.py | 29 - .../versions/004_add_counter_unit.py | 23 - .../versions/005_remove_resource_timestamp.py | 24 - .../versions/006_counter_volume_is_float.py | 25 - .../versions/007_add_alarm_table.py | 46 - .../migrate_repo/versions/008_add_events.py | 60 - .../versions/009_event_strings.py | 24 - .../versions/010_add_index_to_meter.py | 23 - .../versions/011_indexes_cleanup.py | 37 - .../versions/012_add_missing_foreign_keys.py | 58 - .../013_rename_counter_to_meter_alarm.py | 23 - .../versions/014_add_event_message_id.py | 44 - .../versions/015_add_alarm_history_table.py | 63 - .../versions/016_simpler_alarm.py | 60 - ...onvert_timestamp_as_datetime_to_decimal.py | 54 - .../018_resource_resource_metadata_is_text.py | 26 - .../019_alarm_history_detail_is_text.py | 26 - .../versions/020_add_metadata_tables.py | 68 - .../versions/021_add_event_types.py | 77 - .../versions/021_sqlite_upgrade.sql | 29 - .../versions/022_metadata_int_is_bigint.py | 26 - .../versions/023_add_trait_types.py | 86 - .../versions/023_sqlite_upgrade.sql | 34 - .../024_event_use_floatingprecision.py | 56 - .../025_alarm_use_floatingprecision.py | 58 - .../migrate_repo/versions/026_float_size.py | 24 - .../027_remove_alarm_fk_constraints.py | 42 - .../versions/028_alembic_migrations.py | 138 - .../versions/029_sample_recorded_at.py | 24 - .../versions/030_rename_meter_table.py | 110 - .../versions/031_add_new_meter_table.py | 87 - .../032_add_alarm_time_constraints.py | 23 - .../versions/033_alarm_id_rename.py | 21 - .../versions/034_drop_dump_tables.py | 33 - .../versions/035_drop_user_project_tables.py | 84 - .../036_drop_sourceassoc_resource_tables.py | 68 - .../versions/037_sample_index_cleanup.py | 44 - .../versions/038_normalise_tables.py | 131 - .../039_event_floatingprecision_pgsql.py | 56 - .../versions/040_add_alarm_severity.py | 24 - .../versions/041_expand_event_traits.py | 54 - .../versions/042_add_raw_column.py | 21 - .../versions/043_reduce_uuid_data_types.py | 19 - .../044_restore_long_uuid_data_types.py | 37 - .../045_add_resource_metadatahash_index.py | 21 - .../migrate_repo/versions/__init__.py | 0 ceilometer/storage/sqlalchemy/migration.py | 29 - ceilometer/storage/sqlalchemy/models.py | 150 +- ceilometer/storage/sqlalchemy/utils.py | 131 - ceilometer/telemetry/__init__.py | 0 ceilometer/telemetry/notifications.py | 66 - ceilometer/tests/base.py | 17 - ceilometer/tests/db.py | 41 +- ceilometer/tests/functional/api/__init__.py | 4 - .../functional/api/v2/test_acl_scenarios.py | 115 - .../functional/api/v2/test_api_upgrade.py | 148 - .../tests/functional/api/v2/test_app.py | 8 - .../functional/api/v2/test_capabilities.py | 2 +- .../api/v2/test_complex_query_scenarios.py | 314 -- ..._compute_duration_by_resource_scenarios.py | 193 -- .../api/v2/test_list_meters_scenarios.py | 797 ----- .../api/v2/test_list_resources_scenarios.py | 585 ---- .../api/v2/test_list_samples_scenarios.py | 156 - .../api/v2/test_post_samples_scenarios.py | 367 --- .../api/v2/test_statistics_scenarios.py | 1693 ---------- ceilometer/tests/functional/gabbi/fixtures.py | 53 +- .../functional/gabbi/gabbi_pipeline.yaml | 19 - .../gabbi/gabbits/capabilities.yaml | 1 - .../gabbi/gabbits/clean-samples.yaml | 104 - .../gabbi/gabbits/fixture-samples.yaml | 18 - .../functional/gabbi/gabbits/meters.yaml | 401 --- .../gabbi/gabbits/resources-empty.yaml | 59 - .../gabbi/gabbits/resources-fixtured.yaml | 86 - .../functional/gabbi/gabbits/samples.yaml | 155 - .../gabbi/gabbits_prefix/clean-samples.yaml | 51 - .../gabbits_prefix/resources-fixtured.yaml | 24 - .../tests/functional/publisher/test_direct.py | 99 - .../functional/storage/test_impl_hbase.py | 103 - .../tests/functional/storage/test_impl_log.py | 29 - .../functional/storage/test_impl_mongodb.py | 60 +- .../storage/test_impl_sqlalchemy.py | 115 - .../functional/storage/test_pymongo_base.py | 145 - .../storage/test_storage_scenarios.py | 2740 ----------------- ceilometer/tests/functional/test_bin.py | 86 - ceilometer/tests/functional/test_collector.py | 248 -- .../tests/functional/test_notification.py | 614 ---- ceilometer/tests/integration/__init__.py | 0 .../tests/integration/gabbi/__init__.py | 0 .../gabbi/gabbits-live/autoscaling.yaml | 175 -- .../gabbi/gabbits-live/create_stack.json | 67 - .../gabbi/gabbits-live/update_stack.json | 66 - .../integration/gabbi/test_gabbi_live.py | 40 - .../tests/integration/hooks/post_test_hook.sh | 95 - ceilometer/tests/pipeline_base.py | 2157 ------------- ceilometer/tests/tempest/__init__.py | 0 ceilometer/tests/tempest/api/__init__.py | 0 ceilometer/tests/tempest/api/base.py | 162 - .../api/test_telemetry_notification_api.py | 87 - ceilometer/tests/tempest/config.py | 43 - ceilometer/tests/tempest/plugin.py | 44 - ceilometer/tests/tempest/scenario/__init__.py | 0 ...est_object_storage_telemetry_middleware.py | 146 - ceilometer/tests/tempest/service/__init__.py | 0 ceilometer/tests/tempest/service/client.py | 193 -- ceilometer/tests/unit/agent/__init__.py | 0 ceilometer/tests/unit/agent/agentbase.py | 738 ----- ceilometer/tests/unit/agent/test_discovery.py | 108 - ceilometer/tests/unit/agent/test_manager.py | 499 --- ceilometer/tests/unit/agent/test_plugin.py | 60 - ceilometer/tests/unit/api/test_hooks.py | 35 - .../tests/unit/api/v2/test_complex_query.py | 363 --- ceilometer/tests/unit/api/v2/test_query.py | 240 -- .../tests/unit/api/v2/test_statistics.py | 105 - ceilometer/tests/unit/compute/__init__.py | 0 .../unit/compute/notifications/__init__.py | 0 .../compute/notifications/test_instance.py | 608 ---- .../tests/unit/compute/pollsters/__init__.py | 0 .../tests/unit/compute/pollsters/base.py | 56 - .../tests/unit/compute/pollsters/test_cpu.py | 108 - .../unit/compute/pollsters/test_diskio.py | 361 --- .../unit/compute/pollsters/test_instance.py | 79 - .../pollsters/test_location_metadata.py | 120 - .../unit/compute/pollsters/test_memory.py | 115 - .../tests/unit/compute/pollsters/test_net.py | 318 -- .../tests/unit/compute/test_discovery.py | 99 - .../tests/unit/compute/virt/__init__.py | 0 .../unit/compute/virt/hyperv/__init__.py | 0 .../compute/virt/hyperv/test_inspector.py | 200 -- .../unit/compute/virt/libvirt/__init__.py | 0 .../compute/virt/libvirt/test_inspector.py | 374 --- .../unit/compute/virt/vmware/__init__.py | 0 .../compute/virt/vmware/test_inspector.py | 165 - .../virt/vmware/test_vsphere_operations.py | 174 -- .../unit/compute/virt/xenapi/__init__.py | 0 .../compute/virt/xenapi/test_inspector.py | 187 -- ceilometer/tests/unit/dispatcher/test_db.py | 79 +- .../tests/unit/dispatcher/test_dispatcher.py | 52 - ceilometer/tests/unit/dispatcher/test_file.py | 100 - .../tests/unit/dispatcher/test_gnocchi.py | 445 --- ceilometer/tests/unit/dispatcher/test_http.py | 121 - ceilometer/tests/unit/energy/__init__.py | 0 ceilometer/tests/unit/energy/test_kwapi.py | 135 - ceilometer/tests/unit/event/test_converter.py | 781 ----- ceilometer/tests/unit/event/test_endpoint.py | 200 -- .../tests/unit/event/test_trait_plugins.py | 115 - ceilometer/tests/unit/hardware/__init__.py | 0 .../tests/unit/hardware/inspector/__init__.py | 0 .../unit/hardware/inspector/test_inspector.py | 33 - .../unit/hardware/inspector/test_snmp.py | 209 -- .../tests/unit/hardware/pollsters/__init__.py | 0 .../unit/hardware/pollsters/test_generic.py | 185 -- .../unit/hardware/pollsters/test_util.py | 61 - ceilometer/tests/unit/image/__init__.py | 0 ceilometer/tests/unit/image/test_glance.py | 227 -- ceilometer/tests/unit/ipmi/__init__.py | 0 .../tests/unit/ipmi/notifications/__init__.py | 0 .../unit/ipmi/notifications/ipmi_test_data.py | 795 ----- .../unit/ipmi/notifications/test_ironic.py | 214 -- .../tests/unit/ipmi/platform/__init__.py | 0 .../tests/unit/ipmi/platform/fake_utils.py | 120 - .../unit/ipmi/platform/ipmitool_test_data.py | 383 --- .../ipmi/platform/test_intel_node_manager.py | 192 -- .../unit/ipmi/platform/test_ipmi_sensor.py | 128 - .../tests/unit/ipmi/pollsters/__init__.py | 0 ceilometer/tests/unit/ipmi/pollsters/base.py | 74 - .../tests/unit/ipmi/pollsters/test_node.py | 161 - .../tests/unit/ipmi/pollsters/test_sensor.py | 145 - .../tests/unit/meter/test_meter_plugins.py | 71 - .../tests/unit/meter/test_notifications.py | 714 ----- ceilometer/tests/unit/network/__init__.py | 0 .../tests/unit/network/services/__init__.py | 0 .../tests/unit/network/services/test_fwaas.py | 169 - .../tests/unit/network/services/test_lbaas.py | 506 --- .../unit/network/services/test_lbaas_v2.py | 303 -- .../unit/network/services/test_vpnaas.py | 176 -- .../tests/unit/network/statistics/__init__.py | 28 - .../statistics/opencontrail/__init__.py | 0 .../statistics/opencontrail/test_client.py | 71 - .../statistics/opencontrail/test_driver.py | 254 -- .../statistics/opendaylight/__init__.py | 0 .../statistics/opendaylight/test_client.py | 176 -- .../statistics/opendaylight/test_driver.py | 1705 ---------- .../unit/network/statistics/test_driver.py | 37 - .../unit/network/statistics/test_flow.py | 56 - .../unit/network/statistics/test_port.py | 112 - .../network/statistics/test_statistics.py | 185 -- .../unit/network/statistics/test_switch.py | 28 - .../unit/network/statistics/test_table.py | 49 - .../tests/unit/network/test_floating_ip.py | 101 - .../tests/unit/network/test_notifications.py | 1480 --------- ceilometer/tests/unit/objectstore/__init__.py | 0 ceilometer/tests/unit/objectstore/test_rgw.py | 181 -- .../tests/unit/objectstore/test_rgw_client.py | 188 -- .../tests/unit/objectstore/test_swift.py | 216 -- ceilometer/tests/unit/publisher/__init__.py | 0 ceilometer/tests/unit/publisher/test_file.py | 117 - ceilometer/tests/unit/publisher/test_http.py | 170 - .../publisher/test_kafka_broker_publisher.py | 210 -- .../publisher/test_messaging_publisher.py | 290 -- ceilometer/tests/unit/publisher/test_udp.py | 174 -- ceilometer/tests/unit/publisher/test_utils.py | 135 - ceilometer/tests/unit/storage/test_base.py | 54 - .../tests/unit/storage/test_get_connection.py | 48 +- ceilometer/tests/unit/storage/test_models.py | 94 - ceilometer/tests/unit/telemetry/__init__.py | 0 .../unit/telemetry/test_notifications.py | 81 - ceilometer/tests/unit/test_coordination.py | 283 -- ceilometer/tests/unit/test_declarative.py | 48 - .../tests/unit/test_decoupled_pipeline.py | 296 -- ceilometer/tests/unit/test_event_pipeline.py | 410 --- ceilometer/tests/unit/test_messaging.py | 65 - ceilometer/tests/unit/test_middleware.py | 100 - ceilometer/tests/unit/test_neutronclient.py | 186 -- .../tests/unit/test_neutronclient_lbaas_v2.py | 336 -- ceilometer/tests/unit/test_novaclient.py | 248 -- ceilometer/tests/unit/test_sample.py | 68 - ceilometer/tests/unit/test_utils.py | 75 - ceilometer/tests/unit/transformer/__init__.py | 0 .../unit/transformer/test_conversions.py | 114 - ceilometer/transformer/__init__.py | 77 - ceilometer/transformer/accumulator.py | 44 - ceilometer/transformer/arithmetic.py | 156 - ceilometer/transformer/conversions.py | 340 -- ceilometer/utils.py | 126 - devstack/README.rst | 5 - devstack/files/rpms/ceilometer | 1 - devstack/plugin.sh | 221 +- devstack/settings | 34 - devstack/upgrade/settings | 4 +- devstack/upgrade/shutdown.sh | 2 +- devstack/upgrade/upgrade.sh | 10 +- doc/source/1-agents.png | Bin 50041 -> 0 bytes doc/source/2-1-collection-notification.png | Bin 33278 -> 0 bytes doc/source/2-2-collection-poll.png | Bin 32911 -> 0 bytes doc/source/2-accessmodel.png | Bin 44022 -> 0 bytes doc/source/3-Pipeline.png | Bin 46678 -> 0 bytes doc/source/4-Transformer.png | Bin 42222 -> 0 bytes doc/source/5-multi-publish.png | Bin 41915 -> 0 bytes doc/source/6-storagemodel.png | Bin 52865 -> 0 bytes doc/source/architecture.rst | 243 -- doc/source/ceilo-arch.png | Bin 94523 -> 0 bytes doc/source/ceilo-gnocchi-arch.png | Bin 115795 -> 0 bytes doc/source/configuration.rst | 185 -- doc/source/events.rst | 291 -- doc/source/glossary.rst | 132 - doc/source/gmr.rst | 6 +- doc/source/index.rst | 17 +- doc/source/install/custom.rst | 165 - doc/source/install/dbreco.rst | 89 - doc/source/install/development.rst | 18 +- doc/source/install/index.rst | 3 - doc/source/install/manual.rst | 397 +-- doc/source/install/upgrade.rst | 114 - doc/source/measurements.rst | 35 - doc/source/new_meters.rst | 115 - doc/source/overview.rst | 49 - doc/source/plugins.rst | 177 -- doc/source/releasenotes/folsom.rst | 61 - doc/source/releasenotes/index.rst | 40 - doc/source/webapi/v2.rst | 590 ---- etc/apache2/ceilometer | 39 - etc/ceilometer/README-ceilometer.conf.txt | 4 - .../ceilometer-config-generator.conf | 3 - etc/ceilometer/event_definitions.yaml | 545 ---- etc/ceilometer/event_pipeline.yaml | 12 - .../loadbalancer_v2_meter_definitions.yaml | 265 -- .../osprofiler_event_definitions.yaml | 31 - etc/ceilometer/gnocchi_resources.yaml | 213 -- etc/ceilometer/pipeline.yaml | 92 - etc/ceilometer/rootwrap.conf | 27 - etc/ceilometer/rootwrap.d/ipmi.filters | 7 - etc/panko/policy.json | 7 + rally-jobs/README.rst | 29 - rally-jobs/ceilometer.yaml | 69 - rally-jobs/extra/README.rst | 6 - rally-jobs/extra/fake.img | 0 rally-jobs/plugins/README.rst | 9 - rally-jobs/plugins/plugin_sample.py | 27 - releasenotes/notes/.placeholder | 0 ...-transformer-timeout-e0f42b6c96aa7ada.yaml | 5 - .../always-requeue-7a2df9243987ab67.yaml | 15 - .../batch-messaging-d126cc525879d58e.yaml | 12 - .../cache-json-parsers-888307f3b6b498a2.yaml | 6 - ...e-discovery-interval-d19f7c9036a8c186.yaml | 9 - ...rable-data-collector-e247aadbffb85243.yaml | 10 - .../notes/cors-support-70c33ba1f6825a7b.yaml | 9 - .../event-type-race-c295baf7f1661eab.yaml | 5 - ...x-agent-coordination-a7103a78fecaec24.yaml | 9 - ...regation-transformer-9472aea189fa8f65.yaml | 7 - ...-floatingip-pollster-f5172060c626b19e.yaml | 9 - ...work-lb-bytes-sample-5dec2c6f3a8ae174.yaml | 6 - .../notes/gnocchi-cache-1d8025dfc954f281.yaml | 10 - .../notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml | 6 - .../gnocchi-client-42cd992075ee53ab.yaml | 7 - ...gnocchi-host-metrics-829bcb965d8f2533.yaml | 6 - ...nocchi-orchestration-3497c689268df0d1.yaml | 7 - ...nocchi-udp-collector-00415e6674b5cc0f.yaml | 5 - ...resource-definitions-ad4f69f898ced34d.yaml | 8 - ...-events-rbac-support-f216bd7f34b02032.yaml | 11 - ...index-events-mongodb-63cb04200b03a093.yaml | 8 - .../notes/keystone-v3-fab1e257c5672965.yaml | 4 - ...er-def-vol-correctly-0122ae429275f2a6.yaml | 8 - ...handle-large-numbers-7c235598ca700f2d.yaml | 7 - .../notes/remove-alarms-4df3cdb4f1fb5faa.yaml | 5 - .../remove-cadf-http-f8449ced3d2a29d4.yaml | 9 - .../remove-eventlet-6738321434b60c78.yaml | 4 - ...remove-rpc-collector-d0d0a354140fd107.yaml | 9 - ...-duplicate-meter-def-0420164f6a95c50c.yaml | 10 - ...l-query-optimisation-ebb2233f7a9b5d06.yaml | 6 - .../support-None-query-45abaae45f08eda4.yaml | 5 - ...port-lbaasv2-polling-c830dd49bcf25f64.yaml | 15 - ...upport-snmp-cpu-util-5c1c7afb713c1acd.yaml | 5 - ...t-unique-meter-query-221c6e0c1dc1b726.yaml | 7 - ...thread-safe-matching-4a635fc4965c5d4c.yaml | 6 - releasenotes/source/_static/.placeholder | 0 releasenotes/source/_templates/.placeholder | 0 releasenotes/source/conf.py | 275 -- releasenotes/source/index.rst | 10 - releasenotes/source/liberty.rst | 6 - releasenotes/source/mitaka.rst | 6 - releasenotes/source/unreleased.rst | 5 - requirements.txt | 23 +- setup.cfg | 224 -- test-requirements.txt | 8 - tools/ceilometer-test-event.py | 83 - tools/make_test_data.py | 229 -- tools/make_test_data.sh | 77 - tools/make_test_event_data.py | 2 +- tools/send_test_data.py | 149 - tools/show_data.py | 122 - tools/test_hbase_table_utils.py | 5 +- tox.ini | 12 - 500 files changed, 101 insertions(+), 69553 deletions(-) delete mode 100644 ceilometer/agent/__init__.py delete mode 100644 ceilometer/agent/discovery/__init__.py delete mode 100644 ceilometer/agent/discovery/endpoint.py delete mode 100644 ceilometer/agent/discovery/localnode.py delete mode 100644 ceilometer/agent/discovery/tenant.py delete mode 100644 ceilometer/agent/manager.py delete mode 100644 ceilometer/agent/plugin_base.py delete mode 100644 ceilometer/api/controllers/v2/meters.py delete mode 100644 ceilometer/api/controllers/v2/query.py delete mode 100644 ceilometer/api/controllers/v2/resources.py delete mode 100644 ceilometer/api/controllers/v2/samples.py delete mode 100644 ceilometer/cmd/agent_notification.py delete mode 100644 ceilometer/cmd/collector.py delete mode 100644 ceilometer/cmd/polling.py delete mode 100644 ceilometer/cmd/sample.py delete mode 100644 ceilometer/collector.py delete mode 100644 ceilometer/compute/__init__.py delete mode 100644 ceilometer/compute/discovery.py delete mode 100644 ceilometer/compute/notifications/__init__.py delete mode 100644 ceilometer/compute/notifications/instance.py delete mode 100644 ceilometer/compute/pollsters/__init__.py delete mode 100644 ceilometer/compute/pollsters/cpu.py delete mode 100644 ceilometer/compute/pollsters/disk.py delete mode 100644 ceilometer/compute/pollsters/instance.py delete mode 100644 ceilometer/compute/pollsters/memory.py delete mode 100644 ceilometer/compute/pollsters/net.py delete mode 100644 ceilometer/compute/pollsters/util.py delete mode 100644 ceilometer/compute/util.py delete mode 100644 ceilometer/compute/virt/__init__.py delete mode 100644 ceilometer/compute/virt/hyperv/__init__.py delete mode 100644 ceilometer/compute/virt/hyperv/inspector.py delete mode 100644 ceilometer/compute/virt/inspector.py delete mode 100644 ceilometer/compute/virt/libvirt/__init__.py delete mode 100644 ceilometer/compute/virt/libvirt/inspector.py delete mode 100644 ceilometer/compute/virt/vmware/__init__.py delete mode 100644 ceilometer/compute/virt/vmware/inspector.py delete mode 100644 ceilometer/compute/virt/vmware/vsphere_operations.py delete mode 100644 ceilometer/compute/virt/xenapi/__init__.py delete mode 100644 ceilometer/compute/virt/xenapi/inspector.py delete mode 100644 ceilometer/coordination.py delete mode 100644 ceilometer/declarative.py delete mode 100644 ceilometer/dispatcher/file.py delete mode 100644 ceilometer/dispatcher/gnocchi.py delete mode 100644 ceilometer/dispatcher/http.py delete mode 100644 ceilometer/energy/__init__.py delete mode 100644 ceilometer/energy/kwapi.py delete mode 100644 ceilometer/event/converter.py delete mode 100644 ceilometer/event/endpoint.py delete mode 100644 ceilometer/event/trait_plugins.py delete mode 100644 ceilometer/exchange_control.py delete mode 100644 ceilometer/hardware/__init__.py delete mode 100644 ceilometer/hardware/discovery.py delete mode 100644 ceilometer/hardware/inspector/__init__.py delete mode 100644 ceilometer/hardware/inspector/base.py delete mode 100644 ceilometer/hardware/inspector/snmp.py delete mode 100644 ceilometer/hardware/pollsters/__init__.py delete mode 100644 ceilometer/hardware/pollsters/data/snmp.yaml delete mode 100644 ceilometer/hardware/pollsters/generic.py delete mode 100644 ceilometer/hardware/pollsters/util.py delete mode 100644 ceilometer/image/__init__.py delete mode 100644 ceilometer/image/glance.py delete mode 100644 ceilometer/ipmi/__init__.py delete mode 100644 ceilometer/ipmi/notifications/__init__.py delete mode 100644 ceilometer/ipmi/notifications/ironic.py delete mode 100644 ceilometer/ipmi/platform/__init__.py delete mode 100644 ceilometer/ipmi/platform/exception.py delete mode 100644 ceilometer/ipmi/platform/intel_node_manager.py delete mode 100644 ceilometer/ipmi/platform/ipmi_sensor.py delete mode 100644 ceilometer/ipmi/platform/ipmitool.py delete mode 100644 ceilometer/ipmi/pollsters/__init__.py delete mode 100644 ceilometer/ipmi/pollsters/node.py delete mode 100644 ceilometer/ipmi/pollsters/sensor.py delete mode 100644 ceilometer/keystone_client.py delete mode 100644 ceilometer/locale/de/LC_MESSAGES/ceilometer-log-error.po delete mode 100644 ceilometer/locale/de/LC_MESSAGES/ceilometer-log-info.po delete mode 100644 ceilometer/locale/de/LC_MESSAGES/ceilometer-log-warning.po delete mode 100644 ceilometer/locale/de/LC_MESSAGES/ceilometer.po delete mode 100644 ceilometer/locale/es/LC_MESSAGES/ceilometer-log-error.po delete mode 100644 ceilometer/locale/es/LC_MESSAGES/ceilometer-log-info.po delete mode 100644 ceilometer/locale/es/LC_MESSAGES/ceilometer.po delete mode 100644 ceilometer/locale/fr/LC_MESSAGES/ceilometer.po delete mode 100644 ceilometer/locale/it/LC_MESSAGES/ceilometer.po delete mode 100644 ceilometer/locale/ja/LC_MESSAGES/ceilometer.po delete mode 100644 ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-error.po delete mode 100644 ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-info.po delete mode 100644 ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-warning.po delete mode 100644 ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po delete mode 100644 ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po delete mode 100644 ceilometer/locale/ru/LC_MESSAGES/ceilometer.po delete mode 100644 ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po delete mode 100644 ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po delete mode 100644 ceilometer/messaging.py delete mode 100644 ceilometer/meter/__init__.py delete mode 100644 ceilometer/meter/data/meters.yaml delete mode 100644 ceilometer/meter/notifications.py delete mode 100644 ceilometer/middleware.py delete mode 100644 ceilometer/network/__init__.py delete mode 100644 ceilometer/network/floatingip.py delete mode 100644 ceilometer/network/notifications.py delete mode 100644 ceilometer/network/services/__init__.py delete mode 100644 ceilometer/network/services/base.py delete mode 100644 ceilometer/network/services/discovery.py delete mode 100644 ceilometer/network/services/fwaas.py delete mode 100644 ceilometer/network/services/lbaas.py delete mode 100644 ceilometer/network/services/vpnaas.py delete mode 100644 ceilometer/network/statistics/__init__.py delete mode 100644 ceilometer/network/statistics/driver.py delete mode 100644 ceilometer/network/statistics/flow.py delete mode 100644 ceilometer/network/statistics/opencontrail/__init__.py delete mode 100644 ceilometer/network/statistics/opencontrail/client.py delete mode 100644 ceilometer/network/statistics/opencontrail/driver.py delete mode 100644 ceilometer/network/statistics/opendaylight/__init__.py delete mode 100644 ceilometer/network/statistics/opendaylight/client.py delete mode 100644 ceilometer/network/statistics/opendaylight/driver.py delete mode 100644 ceilometer/network/statistics/port.py delete mode 100644 ceilometer/network/statistics/switch.py delete mode 100644 ceilometer/network/statistics/table.py delete mode 100644 ceilometer/neutron_client.py delete mode 100644 ceilometer/notification.py delete mode 100644 ceilometer/nova_client.py delete mode 100644 ceilometer/objectstore/__init__.py delete mode 100644 ceilometer/objectstore/rgw.py delete mode 100644 ceilometer/objectstore/rgw_client.py delete mode 100644 ceilometer/objectstore/swift.py delete mode 100644 ceilometer/pipeline.py delete mode 100644 ceilometer/publisher/direct.py delete mode 100644 ceilometer/publisher/file.py delete mode 100644 ceilometer/publisher/http.py delete mode 100644 ceilometer/publisher/kafka_broker.py delete mode 100644 ceilometer/publisher/messaging.py delete mode 100644 ceilometer/publisher/test.py delete mode 100644 ceilometer/publisher/udp.py delete mode 100644 ceilometer/publisher/utils.py delete mode 100644 ceilometer/sample.py delete mode 100644 ceilometer/service_base.py delete mode 100644 ceilometer/storage/hbase/migration.py delete mode 100644 ceilometer/storage/impl_hbase.py delete mode 100644 ceilometer/storage/impl_log.py delete mode 100644 ceilometer/storage/impl_mongodb.py delete mode 100644 ceilometer/storage/impl_sqlalchemy.py delete mode 100644 ceilometer/storage/models.py delete mode 100644 ceilometer/storage/pymongo_base.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/README delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/__init__.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/manage.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_upgrade.sql delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_upgrade.sql delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/043_reduce_uuid_data_types.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_types.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py delete mode 100644 ceilometer/storage/sqlalchemy/migrate_repo/versions/__init__.py delete mode 100644 ceilometer/storage/sqlalchemy/migration.py delete mode 100644 ceilometer/storage/sqlalchemy/utils.py delete mode 100644 ceilometer/telemetry/__init__.py delete mode 100644 ceilometer/telemetry/notifications.py delete mode 100644 ceilometer/tests/functional/api/v2/test_api_upgrade.py delete mode 100644 ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py delete mode 100644 ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py delete mode 100644 ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py delete mode 100644 ceilometer/tests/functional/api/v2/test_list_resources_scenarios.py delete mode 100644 ceilometer/tests/functional/api/v2/test_list_samples_scenarios.py delete mode 100644 ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py delete mode 100644 ceilometer/tests/functional/api/v2/test_statistics_scenarios.py delete mode 100644 ceilometer/tests/functional/gabbi/gabbi_pipeline.yaml delete mode 100644 ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml delete mode 100644 ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml delete mode 100644 ceilometer/tests/functional/gabbi/gabbits/meters.yaml delete mode 100644 ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml delete mode 100644 ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml delete mode 100644 ceilometer/tests/functional/gabbi/gabbits/samples.yaml delete mode 100644 ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml delete mode 100644 ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml delete mode 100644 ceilometer/tests/functional/publisher/test_direct.py delete mode 100644 ceilometer/tests/functional/storage/test_impl_hbase.py delete mode 100644 ceilometer/tests/functional/storage/test_impl_log.py delete mode 100644 ceilometer/tests/functional/storage/test_pymongo_base.py delete mode 100644 ceilometer/tests/functional/test_collector.py delete mode 100644 ceilometer/tests/functional/test_notification.py delete mode 100644 ceilometer/tests/integration/__init__.py delete mode 100644 ceilometer/tests/integration/gabbi/__init__.py delete mode 100644 ceilometer/tests/integration/gabbi/gabbits-live/autoscaling.yaml delete mode 100644 ceilometer/tests/integration/gabbi/gabbits-live/create_stack.json delete mode 100644 ceilometer/tests/integration/gabbi/gabbits-live/update_stack.json delete mode 100644 ceilometer/tests/integration/gabbi/test_gabbi_live.py delete mode 100755 ceilometer/tests/integration/hooks/post_test_hook.sh delete mode 100644 ceilometer/tests/pipeline_base.py delete mode 100644 ceilometer/tests/tempest/__init__.py delete mode 100644 ceilometer/tests/tempest/api/__init__.py delete mode 100644 ceilometer/tests/tempest/api/base.py delete mode 100644 ceilometer/tests/tempest/api/test_telemetry_notification_api.py delete mode 100644 ceilometer/tests/tempest/config.py delete mode 100644 ceilometer/tests/tempest/plugin.py delete mode 100644 ceilometer/tests/tempest/scenario/__init__.py delete mode 100644 ceilometer/tests/tempest/scenario/test_object_storage_telemetry_middleware.py delete mode 100644 ceilometer/tests/tempest/service/__init__.py delete mode 100644 ceilometer/tests/tempest/service/client.py delete mode 100644 ceilometer/tests/unit/agent/__init__.py delete mode 100644 ceilometer/tests/unit/agent/agentbase.py delete mode 100644 ceilometer/tests/unit/agent/test_discovery.py delete mode 100644 ceilometer/tests/unit/agent/test_manager.py delete mode 100644 ceilometer/tests/unit/agent/test_plugin.py delete mode 100644 ceilometer/tests/unit/api/test_hooks.py delete mode 100644 ceilometer/tests/unit/api/v2/test_complex_query.py delete mode 100644 ceilometer/tests/unit/api/v2/test_statistics.py delete mode 100644 ceilometer/tests/unit/compute/__init__.py delete mode 100644 ceilometer/tests/unit/compute/notifications/__init__.py delete mode 100644 ceilometer/tests/unit/compute/notifications/test_instance.py delete mode 100644 ceilometer/tests/unit/compute/pollsters/__init__.py delete mode 100644 ceilometer/tests/unit/compute/pollsters/base.py delete mode 100644 ceilometer/tests/unit/compute/pollsters/test_cpu.py delete mode 100644 ceilometer/tests/unit/compute/pollsters/test_diskio.py delete mode 100644 ceilometer/tests/unit/compute/pollsters/test_instance.py delete mode 100644 ceilometer/tests/unit/compute/pollsters/test_location_metadata.py delete mode 100644 ceilometer/tests/unit/compute/pollsters/test_memory.py delete mode 100644 ceilometer/tests/unit/compute/pollsters/test_net.py delete mode 100644 ceilometer/tests/unit/compute/test_discovery.py delete mode 100644 ceilometer/tests/unit/compute/virt/__init__.py delete mode 100644 ceilometer/tests/unit/compute/virt/hyperv/__init__.py delete mode 100644 ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py delete mode 100644 ceilometer/tests/unit/compute/virt/libvirt/__init__.py delete mode 100644 ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py delete mode 100644 ceilometer/tests/unit/compute/virt/vmware/__init__.py delete mode 100644 ceilometer/tests/unit/compute/virt/vmware/test_inspector.py delete mode 100644 ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py delete mode 100644 ceilometer/tests/unit/compute/virt/xenapi/__init__.py delete mode 100644 ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py delete mode 100644 ceilometer/tests/unit/dispatcher/test_dispatcher.py delete mode 100644 ceilometer/tests/unit/dispatcher/test_file.py delete mode 100644 ceilometer/tests/unit/dispatcher/test_gnocchi.py delete mode 100644 ceilometer/tests/unit/dispatcher/test_http.py delete mode 100644 ceilometer/tests/unit/energy/__init__.py delete mode 100644 ceilometer/tests/unit/energy/test_kwapi.py delete mode 100644 ceilometer/tests/unit/event/test_converter.py delete mode 100644 ceilometer/tests/unit/event/test_endpoint.py delete mode 100644 ceilometer/tests/unit/event/test_trait_plugins.py delete mode 100644 ceilometer/tests/unit/hardware/__init__.py delete mode 100644 ceilometer/tests/unit/hardware/inspector/__init__.py delete mode 100644 ceilometer/tests/unit/hardware/inspector/test_inspector.py delete mode 100644 ceilometer/tests/unit/hardware/inspector/test_snmp.py delete mode 100644 ceilometer/tests/unit/hardware/pollsters/__init__.py delete mode 100644 ceilometer/tests/unit/hardware/pollsters/test_generic.py delete mode 100644 ceilometer/tests/unit/hardware/pollsters/test_util.py delete mode 100644 ceilometer/tests/unit/image/__init__.py delete mode 100644 ceilometer/tests/unit/image/test_glance.py delete mode 100644 ceilometer/tests/unit/ipmi/__init__.py delete mode 100644 ceilometer/tests/unit/ipmi/notifications/__init__.py delete mode 100644 ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py delete mode 100644 ceilometer/tests/unit/ipmi/notifications/test_ironic.py delete mode 100644 ceilometer/tests/unit/ipmi/platform/__init__.py delete mode 100644 ceilometer/tests/unit/ipmi/platform/fake_utils.py delete mode 100644 ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py delete mode 100644 ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py delete mode 100644 ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py delete mode 100644 ceilometer/tests/unit/ipmi/pollsters/__init__.py delete mode 100644 ceilometer/tests/unit/ipmi/pollsters/base.py delete mode 100644 ceilometer/tests/unit/ipmi/pollsters/test_node.py delete mode 100644 ceilometer/tests/unit/ipmi/pollsters/test_sensor.py delete mode 100644 ceilometer/tests/unit/meter/test_meter_plugins.py delete mode 100644 ceilometer/tests/unit/meter/test_notifications.py delete mode 100644 ceilometer/tests/unit/network/__init__.py delete mode 100644 ceilometer/tests/unit/network/services/__init__.py delete mode 100644 ceilometer/tests/unit/network/services/test_fwaas.py delete mode 100644 ceilometer/tests/unit/network/services/test_lbaas.py delete mode 100644 ceilometer/tests/unit/network/services/test_lbaas_v2.py delete mode 100644 ceilometer/tests/unit/network/services/test_vpnaas.py delete mode 100644 ceilometer/tests/unit/network/statistics/__init__.py delete mode 100644 ceilometer/tests/unit/network/statistics/opencontrail/__init__.py delete mode 100644 ceilometer/tests/unit/network/statistics/opencontrail/test_client.py delete mode 100644 ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py delete mode 100644 ceilometer/tests/unit/network/statistics/opendaylight/__init__.py delete mode 100644 ceilometer/tests/unit/network/statistics/opendaylight/test_client.py delete mode 100644 ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py delete mode 100644 ceilometer/tests/unit/network/statistics/test_driver.py delete mode 100644 ceilometer/tests/unit/network/statistics/test_flow.py delete mode 100644 ceilometer/tests/unit/network/statistics/test_port.py delete mode 100644 ceilometer/tests/unit/network/statistics/test_statistics.py delete mode 100644 ceilometer/tests/unit/network/statistics/test_switch.py delete mode 100644 ceilometer/tests/unit/network/statistics/test_table.py delete mode 100644 ceilometer/tests/unit/network/test_floating_ip.py delete mode 100644 ceilometer/tests/unit/network/test_notifications.py delete mode 100644 ceilometer/tests/unit/objectstore/__init__.py delete mode 100644 ceilometer/tests/unit/objectstore/test_rgw.py delete mode 100644 ceilometer/tests/unit/objectstore/test_rgw_client.py delete mode 100644 ceilometer/tests/unit/objectstore/test_swift.py delete mode 100644 ceilometer/tests/unit/publisher/__init__.py delete mode 100644 ceilometer/tests/unit/publisher/test_file.py delete mode 100644 ceilometer/tests/unit/publisher/test_http.py delete mode 100644 ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py delete mode 100644 ceilometer/tests/unit/publisher/test_messaging_publisher.py delete mode 100644 ceilometer/tests/unit/publisher/test_udp.py delete mode 100644 ceilometer/tests/unit/publisher/test_utils.py delete mode 100644 ceilometer/tests/unit/storage/test_base.py delete mode 100644 ceilometer/tests/unit/storage/test_models.py delete mode 100644 ceilometer/tests/unit/telemetry/__init__.py delete mode 100644 ceilometer/tests/unit/telemetry/test_notifications.py delete mode 100644 ceilometer/tests/unit/test_coordination.py delete mode 100644 ceilometer/tests/unit/test_declarative.py delete mode 100644 ceilometer/tests/unit/test_decoupled_pipeline.py delete mode 100644 ceilometer/tests/unit/test_event_pipeline.py delete mode 100644 ceilometer/tests/unit/test_messaging.py delete mode 100644 ceilometer/tests/unit/test_middleware.py delete mode 100644 ceilometer/tests/unit/test_neutronclient.py delete mode 100644 ceilometer/tests/unit/test_neutronclient_lbaas_v2.py delete mode 100644 ceilometer/tests/unit/test_novaclient.py delete mode 100644 ceilometer/tests/unit/test_sample.py delete mode 100644 ceilometer/tests/unit/transformer/__init__.py delete mode 100644 ceilometer/tests/unit/transformer/test_conversions.py delete mode 100644 ceilometer/transformer/__init__.py delete mode 100644 ceilometer/transformer/accumulator.py delete mode 100644 ceilometer/transformer/arithmetic.py delete mode 100644 ceilometer/transformer/conversions.py delete mode 100644 devstack/files/rpms/ceilometer delete mode 100644 doc/source/1-agents.png delete mode 100644 doc/source/2-1-collection-notification.png delete mode 100644 doc/source/2-2-collection-poll.png delete mode 100644 doc/source/2-accessmodel.png delete mode 100644 doc/source/3-Pipeline.png delete mode 100644 doc/source/4-Transformer.png delete mode 100644 doc/source/5-multi-publish.png delete mode 100644 doc/source/6-storagemodel.png delete mode 100644 doc/source/architecture.rst delete mode 100644 doc/source/ceilo-arch.png delete mode 100644 doc/source/ceilo-gnocchi-arch.png delete mode 100644 doc/source/configuration.rst delete mode 100644 doc/source/events.rst delete mode 100644 doc/source/glossary.rst delete mode 100644 doc/source/install/custom.rst delete mode 100644 doc/source/install/dbreco.rst delete mode 100644 doc/source/install/upgrade.rst delete mode 100644 doc/source/measurements.rst delete mode 100644 doc/source/new_meters.rst delete mode 100644 doc/source/overview.rst delete mode 100644 doc/source/plugins.rst delete mode 100644 doc/source/releasenotes/folsom.rst delete mode 100644 doc/source/releasenotes/index.rst delete mode 100644 etc/apache2/ceilometer delete mode 100644 etc/ceilometer/README-ceilometer.conf.txt delete mode 100644 etc/ceilometer/event_definitions.yaml delete mode 100644 etc/ceilometer/event_pipeline.yaml delete mode 100644 etc/ceilometer/examples/loadbalancer_v2_meter_definitions.yaml delete mode 100644 etc/ceilometer/examples/osprofiler_event_definitions.yaml delete mode 100644 etc/ceilometer/gnocchi_resources.yaml delete mode 100644 etc/ceilometer/pipeline.yaml delete mode 100644 etc/ceilometer/rootwrap.conf delete mode 100644 etc/ceilometer/rootwrap.d/ipmi.filters create mode 100644 etc/panko/policy.json delete mode 100644 rally-jobs/README.rst delete mode 100644 rally-jobs/ceilometer.yaml delete mode 100644 rally-jobs/extra/README.rst delete mode 100644 rally-jobs/extra/fake.img delete mode 100644 rally-jobs/plugins/README.rst delete mode 100644 rally-jobs/plugins/plugin_sample.py delete mode 100644 releasenotes/notes/.placeholder delete mode 100644 releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml delete mode 100644 releasenotes/notes/always-requeue-7a2df9243987ab67.yaml delete mode 100644 releasenotes/notes/batch-messaging-d126cc525879d58e.yaml delete mode 100644 releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml delete mode 100644 releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml delete mode 100644 releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml delete mode 100644 releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml delete mode 100644 releasenotes/notes/event-type-race-c295baf7f1661eab.yaml delete mode 100644 releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml delete mode 100644 releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml delete mode 100644 releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml delete mode 100644 releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml delete mode 100644 releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml delete mode 100644 releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml delete mode 100644 releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml delete mode 100644 releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml delete mode 100644 releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml delete mode 100644 releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml delete mode 100644 releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml delete mode 100644 releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml delete mode 100644 releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml delete mode 100644 releasenotes/notes/keystone-v3-fab1e257c5672965.yaml delete mode 100644 releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml delete mode 100644 releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml delete mode 100644 releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml delete mode 100644 releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml delete mode 100644 releasenotes/notes/remove-eventlet-6738321434b60c78.yaml delete mode 100644 releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml delete mode 100644 releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml delete mode 100644 releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml delete mode 100644 releasenotes/notes/support-None-query-45abaae45f08eda4.yaml delete mode 100644 releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml delete mode 100644 releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml delete mode 100644 releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml delete mode 100644 releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml delete mode 100644 releasenotes/source/_static/.placeholder delete mode 100644 releasenotes/source/_templates/.placeholder delete mode 100644 releasenotes/source/conf.py delete mode 100644 releasenotes/source/index.rst delete mode 100644 releasenotes/source/liberty.rst delete mode 100644 releasenotes/source/mitaka.rst delete mode 100644 releasenotes/source/unreleased.rst delete mode 100755 tools/ceilometer-test-event.py delete mode 100755 tools/make_test_data.py delete mode 100755 tools/make_test_data.sh delete mode 100755 tools/send_test_data.py delete mode 100755 tools/show_data.py diff --git a/.gitignore b/.gitignore index 83cf18b2..32e8f748 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,3 @@ doc/build doc/source/api/ etc/ceilometer/ceilometer.conf subunit.log - -# Files created by releasenotes build -releasenotes/build diff --git a/README.rst b/README.rst index 8cd4f6b2..b7314705 100644 --- a/README.rst +++ b/README.rst @@ -1,9 +1,6 @@ ceilometer ========== -Release notes can be read online at: - http://docs.openstack.org/developer/ceilometer/releasenotes/index.html - Documentation for the project can be found at: http://docs.openstack.org/developer/ceilometer/ diff --git a/ceilometer/agent/__init__.py b/ceilometer/agent/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/agent/discovery/__init__.py b/ceilometer/agent/discovery/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/agent/discovery/endpoint.py b/ceilometer/agent/discovery/endpoint.py deleted file mode 100644 index bb177659..00000000 --- a/ceilometer/agent/discovery/endpoint.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2014-2015 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log - -from ceilometer.agent import plugin_base as plugin -from ceilometer.i18n import _LW -from ceilometer import keystone_client - -LOG = log.getLogger(__name__) - -cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') - - -class EndpointDiscovery(plugin.DiscoveryBase): - """Discovery that supplies service endpoints. - - This discovery should be used when the relevant APIs are not well suited - to dividing the pollster's work into smaller pieces than a whole service - at once. - """ - - @staticmethod - def discover(manager, param=None): - endpoints = keystone_client.get_service_catalog( - manager.keystone).get_urls( - service_type=param, - interface=cfg.CONF.service_credentials.interface, - region_name=cfg.CONF.service_credentials.region_name) - if not endpoints: - LOG.warning(_LW('No endpoints found for service %s'), - "" if param is None else param) - return [] - return endpoints diff --git a/ceilometer/agent/discovery/localnode.py b/ceilometer/agent/discovery/localnode.py deleted file mode 100644 index 1de479f3..00000000 --- a/ceilometer/agent/discovery/localnode.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2015 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.agent import plugin_base - - -class LocalNodeDiscovery(plugin_base.DiscoveryBase): - def discover(self, manager, param=None): - """Return local node as resource.""" - return ['local_host'] diff --git a/ceilometer/agent/discovery/tenant.py b/ceilometer/agent/discovery/tenant.py deleted file mode 100644 index 8ae84688..00000000 --- a/ceilometer/agent/discovery/tenant.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from ceilometer.agent import plugin_base as plugin - -cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') - - -class TenantDiscovery(plugin.DiscoveryBase): - """Discovery that supplies keystone tenants. - - This discovery should be used when the pollster's work can't be divided - into smaller pieces than per-tenants. Example of this is the Swift - pollster, which polls account details and does so per-project. - """ - - def discover(self, manager, param=None): - tenants = manager.keystone.projects.list() - return tenants or [] diff --git a/ceilometer/agent/manager.py b/ceilometer/agent/manager.py deleted file mode 100644 index b0784c25..00000000 --- a/ceilometer/agent/manager.py +++ /dev/null @@ -1,509 +0,0 @@ -# -# Copyright 2013 Julien Danjou -# Copyright 2014 Red Hat, Inc -# -# Authors: Julien Danjou -# Eoghan Glynn -# Nejc Saje -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import itertools -import random - -from keystoneauth1 import exceptions as ka_exceptions -from keystoneclient import exceptions as ks_exceptions -from oslo_config import cfg -from oslo_log import log -import oslo_messaging -from oslo_utils import fnmatch -from oslo_utils import timeutils -from six import moves -from six.moves.urllib import parse as urlparse -from stevedore import extension - -from ceilometer.agent import plugin_base -from ceilometer import coordination -from ceilometer.i18n import _, _LE, _LI, _LW -from ceilometer import keystone_client -from ceilometer import messaging -from ceilometer import pipeline -from ceilometer.publisher import utils as publisher_utils -from ceilometer import service_base -from ceilometer import utils - -LOG = log.getLogger(__name__) - -OPTS = [ - cfg.BoolOpt('batch_polled_samples', - default=True, - help='To reduce polling agent load, samples are sent to the ' - 'notification agent in a batch. To gain higher ' - 'throughput at the cost of load set this to False.'), - cfg.IntOpt('shuffle_time_before_polling_task', - default=0, - help='To reduce large requests at same time to Nova or other ' - 'components from different compute agents, shuffle ' - 'start time of polling task.'), -] - -POLLING_OPTS = [ - cfg.StrOpt('partitioning_group_prefix', - deprecated_group='central', - help='Work-load partitioning group prefix. Use only if you ' - 'want to run multiple polling agents with different ' - 'config files. For each sub-group of the agent ' - 'pool with the same partitioning_group_prefix a disjoint ' - 'subset of pollsters should be loaded.'), -] - -cfg.CONF.register_opts(OPTS) -cfg.CONF.register_opts(POLLING_OPTS, group='polling') -cfg.CONF.import_opt('telemetry_driver', 'ceilometer.publisher.messaging', - group='publisher_notifier') -cfg.CONF.import_group('service_types', 'ceilometer.energy.kwapi') -cfg.CONF.import_group('service_types', 'ceilometer.image.glance') -cfg.CONF.import_group('service_types', 'ceilometer.neutron_client') -cfg.CONF.import_group('service_types', 'ceilometer.nova_client') -cfg.CONF.import_group('service_types', 'ceilometer.objectstore.rgw') -cfg.CONF.import_group('service_types', 'ceilometer.objectstore.swift') - - -class PollsterListForbidden(Exception): - def __init__(self): - msg = ('It is forbidden to use pollster-list option of polling agent ' - 'in case of using coordination between multiple agents. Please ' - 'use either multiple agents being coordinated or polling list ' - 'option for one polling agent.') - super(PollsterListForbidden, self).__init__(msg) - - -class EmptyPollstersList(Exception): - def __init__(self): - msg = ('No valid pollsters can be loaded with the startup parameters' - ' polling-namespaces and pollster-list.') - super(EmptyPollstersList, self).__init__(msg) - - -class Resources(object): - def __init__(self, agent_manager): - self.agent_manager = agent_manager - self._resources = [] - self._discovery = [] - self.blacklist = [] - - def setup(self, source): - self._resources = source.resources - self._discovery = source.discovery - - def get(self, discovery_cache=None): - source_discovery = (self.agent_manager.discover(self._discovery, - discovery_cache) - if self._discovery else []) - static_resources = [] - if self._resources: - static_resources_group = self.agent_manager.construct_group_id( - utils.hash_of_set(self._resources)) - p_coord = self.agent_manager.partition_coordinator - static_resources = p_coord.extract_my_subset( - static_resources_group, self._resources) - return static_resources + source_discovery - - @staticmethod - def key(source_name, pollster): - return '%s-%s' % (source_name, pollster.name) - - -class PollingTask(object): - """Polling task for polling samples and notifying. - - A polling task can be invoked periodically or only once. - """ - - def __init__(self, agent_manager): - self.manager = agent_manager - - # elements of the Cartesian product of sources X pollsters - # with a common interval - self.pollster_matches = collections.defaultdict(set) - - # we relate the static resources and per-source discovery to - # each combination of pollster and matching source - resource_factory = lambda: Resources(agent_manager) - self.resources = collections.defaultdict(resource_factory) - - self._batch = cfg.CONF.batch_polled_samples - self._telemetry_secret = cfg.CONF.publisher.telemetry_secret - - def add(self, pollster, source): - self.pollster_matches[source.name].add(pollster) - key = Resources.key(source.name, pollster) - self.resources[key].setup(source) - - def poll_and_notify(self): - """Polling sample and notify.""" - cache = {} - discovery_cache = {} - poll_history = {} - for source_name in self.pollster_matches: - for pollster in self.pollster_matches[source_name]: - key = Resources.key(source_name, pollster) - candidate_res = list( - self.resources[key].get(discovery_cache)) - if not candidate_res and pollster.obj.default_discovery: - candidate_res = self.manager.discover( - [pollster.obj.default_discovery], discovery_cache) - - # Remove duplicated resources and black resources. Using - # set() requires well defined __hash__ for each resource. - # Since __eq__ is defined, 'not in' is safe here. - polling_resources = [] - black_res = self.resources[key].blacklist - history = poll_history.get(pollster.name, []) - for x in candidate_res: - if x not in history: - history.append(x) - if x not in black_res: - polling_resources.append(x) - poll_history[pollster.name] = history - - # If no resources, skip for this pollster - if not polling_resources: - p_context = 'new ' if history else '' - LOG.info(_LI("Skip pollster %(name)s, no %(p_context)s" - "resources found this cycle"), - {'name': pollster.name, 'p_context': p_context}) - continue - - LOG.info(_LI("Polling pollster %(poll)s in the context of " - "%(src)s"), - dict(poll=pollster.name, src=source_name)) - try: - polling_timestamp = timeutils.utcnow().isoformat() - samples = pollster.obj.get_samples( - manager=self.manager, - cache=cache, - resources=polling_resources - ) - sample_batch = [] - - for sample in samples: - # Note(yuywz): Unify the timestamp of polled samples - sample.set_timestamp(polling_timestamp) - sample_dict = ( - publisher_utils.meter_message_from_counter( - sample, self._telemetry_secret - )) - if self._batch: - sample_batch.append(sample_dict) - else: - self._send_notification([sample_dict]) - - if sample_batch: - self._send_notification(sample_batch) - - except plugin_base.PollsterPermanentError as err: - LOG.error(_( - 'Prevent pollster %(name)s for ' - 'polling source %(source)s anymore!') - % ({'name': pollster.name, 'source': source_name})) - self.resources[key].blacklist.extend(err.fail_res_list) - except Exception as err: - LOG.warning(_( - 'Continue after error from %(name)s: %(error)s') - % ({'name': pollster.name, 'error': err}), - exc_info=True) - - def _send_notification(self, samples): - self.manager.notifier.sample( - {}, - 'telemetry.polling', - {'samples': samples} - ) - - -class AgentManager(service_base.PipelineBasedService): - - def __init__(self, namespaces=None, pollster_list=None): - namespaces = namespaces or ['compute', 'central'] - pollster_list = pollster_list or [] - group_prefix = cfg.CONF.polling.partitioning_group_prefix - - # features of using coordination and pollster-list are exclusive, and - # cannot be used at one moment to avoid both samples duplication and - # samples being lost - if pollster_list and cfg.CONF.coordination.backend_url: - raise PollsterListForbidden() - - super(AgentManager, self).__init__() - - def _match(pollster): - """Find out if pollster name matches to one of the list.""" - return any(fnmatch.fnmatch(pollster.name, pattern) for - pattern in pollster_list) - - if type(namespaces) is not list: - namespaces = [namespaces] - - # we'll have default ['compute', 'central'] here if no namespaces will - # be passed - extensions = (self._extensions('poll', namespace).extensions - for namespace in namespaces) - # get the extensions from pollster builder - extensions_fb = (self._extensions_from_builder('poll', namespace) - for namespace in namespaces) - if pollster_list: - extensions = (moves.filter(_match, exts) - for exts in extensions) - extensions_fb = (moves.filter(_match, exts) - for exts in extensions_fb) - - self.extensions = list(itertools.chain(*list(extensions))) + list( - itertools.chain(*list(extensions_fb))) - - if self.extensions == []: - raise EmptyPollstersList() - - self.discovery_manager = self._extensions('discover') - self.partition_coordinator = coordination.PartitionCoordinator() - - # Compose coordination group prefix. - # We'll use namespaces as the basement for this partitioning. - namespace_prefix = '-'.join(sorted(namespaces)) - self.group_prefix = ('%s-%s' % (namespace_prefix, group_prefix) - if group_prefix else namespace_prefix) - - self.notifier = oslo_messaging.Notifier( - messaging.get_transport(), - driver=cfg.CONF.publisher_notifier.telemetry_driver, - publisher_id="ceilometer.polling") - - self._keystone = None - self._keystone_last_exception = None - - @staticmethod - def _get_ext_mgr(namespace): - def _catch_extension_load_error(mgr, ep, exc): - # Extension raising ExtensionLoadError can be ignored, - # and ignore anything we can't import as a safety measure. - if isinstance(exc, plugin_base.ExtensionLoadError): - LOG.exception(_("Skip loading extension for %s") % ep.name) - return - if isinstance(exc, ImportError): - LOG.error(_("Failed to import extension for %(name)s: " - "%(error)s"), - {'name': ep.name, 'error': exc}) - return - raise exc - - return extension.ExtensionManager( - namespace=namespace, - invoke_on_load=True, - on_load_failure_callback=_catch_extension_load_error, - ) - - def _extensions(self, category, agent_ns=None): - namespace = ('ceilometer.%s.%s' % (category, agent_ns) if agent_ns - else 'ceilometer.%s' % category) - return self._get_ext_mgr(namespace) - - def _extensions_from_builder(self, category, agent_ns=None): - ns = ('ceilometer.builder.%s.%s' % (category, agent_ns) if agent_ns - else 'ceilometer.builder.%s' % category) - mgr = self._get_ext_mgr(ns) - - def _build(ext): - return ext.plugin.get_pollsters_extensions() - - # NOTE: this seems a stevedore bug. if no extensions are found, - # map will raise runtimeError which is not documented. - if mgr.names(): - return list(itertools.chain(*mgr.map(_build))) - else: - return [] - - def join_partitioning_groups(self): - self.groups = set([self.construct_group_id(d.obj.group_id) - for d in self.discovery_manager]) - # let each set of statically-defined resources have its own group - static_resource_groups = set([ - self.construct_group_id(utils.hash_of_set(p.resources)) - for p in self.polling_manager.sources - if p.resources - ]) - self.groups.update(static_resource_groups) - for group in self.groups: - self.partition_coordinator.join_group(group) - - def create_polling_task(self): - """Create an initially empty polling task.""" - return PollingTask(self) - - def setup_polling_tasks(self): - polling_tasks = {} - for source in self.polling_manager.sources: - polling_task = None - for pollster in self.extensions: - if source.support_meter(pollster.name): - polling_task = polling_tasks.get(source.get_interval()) - if not polling_task: - polling_task = self.create_polling_task() - polling_tasks[source.get_interval()] = polling_task - polling_task.add(pollster, source) - return polling_tasks - - def construct_group_id(self, discovery_group_id): - return ('%s-%s' % (self.group_prefix, - discovery_group_id) - if discovery_group_id else None) - - def configure_polling_tasks(self): - # allow time for coordination if necessary - delay_start = self.partition_coordinator.is_active() - - # set shuffle time before polling task if necessary - delay_polling_time = random.randint( - 0, cfg.CONF.shuffle_time_before_polling_task) - - pollster_timers = [] - data = self.setup_polling_tasks() - for interval, polling_task in data.items(): - delay_time = (interval + delay_polling_time if delay_start - else delay_polling_time) - pollster_timers.append(self.tg.add_timer(interval, - self.interval_task, - initial_delay=delay_time, - task=polling_task)) - self.tg.add_timer(cfg.CONF.coordination.heartbeat, - self.partition_coordinator.heartbeat) - - return pollster_timers - - def start(self): - super(AgentManager, self).start() - self.polling_manager = pipeline.setup_polling() - - self.partition_coordinator.start() - self.join_partitioning_groups() - - self.pollster_timers = self.configure_polling_tasks() - - self.init_pipeline_refresh() - - def stop(self): - if self.started: - self.partition_coordinator.stop() - super(AgentManager, self).stop() - - def interval_task(self, task): - # NOTE(sileht): remove the previous keystone client - # and exception to get a new one in this polling cycle. - self._keystone = None - self._keystone_last_exception = None - - task.poll_and_notify() - - @property - def keystone(self): - # NOTE(sileht): we do lazy loading of the keystone client - # for multiple reasons: - # * don't use it if no plugin need it - # * use only one client for all plugins per polling cycle - if self._keystone is None and self._keystone_last_exception is None: - try: - self._keystone = keystone_client.get_client() - self._keystone_last_exception = None - except (ka_exceptions.ClientException, - ks_exceptions.ClientException) as e: - self._keystone = None - self._keystone_last_exception = e - if self._keystone is not None: - return self._keystone - else: - raise self._keystone_last_exception - - @staticmethod - def _parse_discoverer(url): - s = urlparse.urlparse(url) - return (s.scheme or s.path), (s.netloc + s.path if s.scheme else None) - - def _discoverer(self, name): - for d in self.discovery_manager: - if d.name == name: - return d.obj - return None - - def discover(self, discovery=None, discovery_cache=None): - resources = [] - discovery = discovery or [] - for url in discovery: - if discovery_cache is not None and url in discovery_cache: - resources.extend(discovery_cache[url]) - continue - name, param = self._parse_discoverer(url) - discoverer = self._discoverer(name) - if discoverer: - try: - if discoverer.KEYSTONE_REQUIRED_FOR_SERVICE: - service_type = getattr( - cfg.CONF.service_types, - discoverer.KEYSTONE_REQUIRED_FOR_SERVICE) - if not keystone_client.get_service_catalog( - self.keystone).get_endpoints( - service_type=service_type): - LOG.warning(_LW( - 'Skipping %(name)s, %(service_type)s service ' - 'is not registered in keystone'), - {'name': name, 'service_type': service_type}) - continue - - discovered = discoverer.discover(self, param) - partitioned = self.partition_coordinator.extract_my_subset( - self.construct_group_id(discoverer.group_id), - discovered) - resources.extend(partitioned) - if discovery_cache is not None: - discovery_cache[url] = partitioned - except (ka_exceptions.ClientException, - ks_exceptions.ClientException) as e: - LOG.error(_LE('Skipping %(name)s, keystone issue: ' - '%(exc)s'), {'name': name, 'exc': e}) - except Exception as err: - LOG.exception(_('Unable to discover resources: %s') % err) - else: - LOG.warning(_('Unknown discovery extension: %s') % name) - return resources - - def stop_pollsters(self): - for x in self.pollster_timers: - try: - x.stop() - self.tg.timer_done(x) - except Exception: - LOG.error(_('Error stopping pollster.'), exc_info=True) - self.pollster_timers = [] - - def reload_pipeline(self): - if self.pipeline_validated: - LOG.info(_LI("Reconfiguring polling tasks.")) - - # stop existing pollsters and leave partitioning groups - self.stop_pollsters() - for group in self.groups: - self.partition_coordinator.leave_group(group) - - # re-create partitioning groups according to pipeline - # and configure polling tasks with latest pipeline conf - self.join_partitioning_groups() - self.pollster_timers = self.configure_polling_tasks() diff --git a/ceilometer/agent/plugin_base.py b/ceilometer/agent/plugin_base.py deleted file mode 100644 index e47db093..00000000 --- a/ceilometer/agent/plugin_base.py +++ /dev/null @@ -1,270 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Base class for plugins. -""" - -import abc -import collections - -from oslo_log import log -import oslo_messaging -import six -from stevedore import extension - -from ceilometer.i18n import _LE -from ceilometer import messaging - -LOG = log.getLogger(__name__) - -ExchangeTopics = collections.namedtuple('ExchangeTopics', - ['exchange', 'topics']) - - -class PluginBase(object): - """Base class for all plugins.""" - - -@six.add_metaclass(abc.ABCMeta) -class NotificationBase(PluginBase): - """Base class for plugins that support the notification API.""" - def __init__(self, manager): - super(NotificationBase, self).__init__() - # NOTE(gordc): this is filter rule used by oslo.messaging to dispatch - # messages to an endpoint. - if self.event_types: - self.filter_rule = oslo_messaging.NotificationFilter( - event_type='|'.join(self.event_types)) - self.manager = manager - - @staticmethod - def get_notification_topics(conf): - if 'notification_topics' in conf: - return conf.notification_topics - return conf.oslo_messaging_notifications.topics - - @abc.abstractproperty - def event_types(self): - """Return a sequence of strings. - - Strings are defining the event types to be given to this plugin. - """ - - @abc.abstractmethod - def get_targets(self, conf): - """Return a sequence of oslo.messaging.Target. - - Sequence is defining the exchange and topics to be connected for this - plugin. - :param conf: Configuration. - """ - - @abc.abstractmethod - def process_notification(self, message): - """Return a sequence of Counter instances for the given message. - - :param message: Message to process. - """ - - def info(self, notifications): - """RPC endpoint for notification messages at info level - - When another service sends a notification over the message - bus, this method receives it. - - :param notifications: list of notifications - """ - self._process_notifications('info', notifications) - - def sample(self, notifications): - """RPC endpoint for notification messages at sample level - - When another service sends a notification over the message - bus at sample priority, this method receives it. - - :param notifications: list of notifications - """ - self._process_notifications('sample', notifications) - - def _process_notifications(self, priority, notifications): - for notification in notifications: - try: - notification = messaging.convert_to_old_notification_format( - priority, notification) - self.to_samples_and_publish(notification) - except Exception: - LOG.error(_LE('Fail to process notification'), exc_info=True) - - def to_samples_and_publish(self, notification): - """Return samples produced by *process_notification*. - - Samples produced for the given notification. - :param context: Execution context from the service or RPC call - :param notification: The notification to process. - """ - with self.manager.publisher() as p: - p(list(self.process_notification(notification))) - - -class NonMetricNotificationBase(object): - """Use to mark non-measurement meters - - There are a number of historical non-measurement meters that should really - be captured as events. This common base allows us to disable these invalid - meters. - """ - pass - - -class ExtensionLoadError(Exception): - """Error of loading pollster plugin. - - PollsterBase provides a hook, setup_environment, called in pollster loading - to setup required HW/SW dependency. Any exception from it would be - propagated as ExtensionLoadError, then skip loading this pollster. - """ - pass - - -class PollsterPermanentError(Exception): - """Permanent error when polling. - - When unrecoverable error happened in polling, pollster can raise this - exception with failed resource to prevent itself from polling any more. - Resource is one of parameter resources from get_samples that cause polling - error. - """ - - def __init__(self, resources): - self.fail_res_list = resources - - -@six.add_metaclass(abc.ABCMeta) -class PollsterBase(PluginBase): - """Base class for plugins that support the polling API.""" - - def setup_environment(self): - """Setup required environment for pollster. - - Each subclass could overwrite it for specific usage. Any exception - raised in this function would prevent pollster being loaded. - """ - pass - - def __init__(self): - super(PollsterBase, self).__init__() - try: - self.setup_environment() - except Exception as err: - raise ExtensionLoadError(err) - - @abc.abstractproperty - def default_discovery(self): - """Default discovery to use for this pollster. - - There are three ways a pollster can get a list of resources to poll, - listed here in ascending order of precedence: - 1. from the per-agent discovery, - 2. from the per-pollster discovery (defined here) - 3. from the per-pipeline configured discovery and/or per-pipeline - configured static resources. - - If a pollster should only get resources from #1 or #3, this property - should be set to None. - """ - - @abc.abstractmethod - def get_samples(self, manager, cache, resources): - """Return a sequence of Counter instances from polling the resources. - - :param manager: The service manager class invoking the plugin. - :param cache: A dictionary to allow pollsters to pass data - between themselves when recomputing it would be - expensive (e.g., asking another service for a - list of objects). - :param resources: A list of resources the pollster will get data - from. It's up to the specific pollster to decide - how to use it. It is usually supplied by a discovery, - see ``default_discovery`` for more information. - - """ - - @classmethod - def build_pollsters(cls): - """Return a list of tuple (name, pollster). - - The name is the meter name which the pollster would return, the - pollster is a pollster object instance. The pollster which implements - this method should be registered in the namespace of - ceilometer.builder.xxx instead of ceilometer.poll.xxx. - """ - return [] - - @classmethod - def get_pollsters_extensions(cls): - """Return a list of stevedore extensions. - - The returned stevedore extensions wrap the pollster object instances - returned by build_pollsters. - """ - extensions = [] - try: - for name, pollster in cls.build_pollsters(): - ext = extension.Extension(name, None, cls, pollster) - extensions.append(ext) - except Exception as err: - raise ExtensionLoadError(err) - return extensions - - -@six.add_metaclass(abc.ABCMeta) -class DiscoveryBase(object): - KEYSTONE_REQUIRED_FOR_SERVICE = None - """Service type required in keystone catalog to works""" - - @abc.abstractmethod - def discover(self, manager, param=None): - """Discover resources to monitor. - - The most fine-grained discovery should be preferred, so the work is - the most evenly distributed among multiple agents (if they exist). - - For example: - if the pollster can separately poll individual resources, it should - have its own discovery implementation to discover those resources. If - it can only poll per-tenant, then the `TenantDiscovery` should be - used. If even that is not possible, use `EndpointDiscovery` (see - their respective docstrings). - - :param manager: The service manager class invoking the plugin. - :param param: an optional parameter to guide the discovery - """ - - @property - def group_id(self): - """Return group id of this discovery. - - All running recoveries with the same group_id should return the same - set of resources at a given point in time. By default, a discovery is - put into a global group, meaning that all discoveries of its type - running anywhere in the cloud, return the same set of resources. - - This property can be overridden to provide correct grouping of - localized discoveries. For example, compute discovery is localized - to a host, which is reflected in its group_id. - - A None value signifies that this discovery does not want to be part - of workload partitioning at all. - """ - return 'global' diff --git a/ceilometer/api/app.py b/ceilometer/api/app.py index adde6d16..1c8181be 100644 --- a/ceilometer/api/app.py +++ b/ceilometer/api/app.py @@ -36,6 +36,7 @@ OPTS = [ ), ] + API_OPTS = [ cfg.BoolOpt('pecan_debug', default=False, @@ -45,6 +46,12 @@ API_OPTS = [ default=100, help='Default maximum number of items returned by API request.' ), + cfg.IntOpt('workers', + default=1, + min=1, + deprecated_group='DEFAULT', + deprecated_name='api_workers', + help='Number of workers for api, default value is 1.'), ] CONF.register_opts(OPTS) @@ -55,7 +62,6 @@ def setup_app(pecan_config=None): # FIXME: Replace DBHook with a hooks.TransactionHook app_hooks = [hooks.ConfigHook(), hooks.DBHook(), - hooks.NotifierHook(), hooks.TranslationHook()] pecan_config = pecan_config or { diff --git a/ceilometer/api/controllers/v2/base.py b/ceilometer/api/controllers/v2/base.py index e3e5f9db..15e30b95 100644 --- a/ceilometer/api/controllers/v2/base.py +++ b/ceilometer/api/controllers/v2/base.py @@ -108,24 +108,6 @@ class Base(wtypes.DynamicBase): getattr(self, k) != wsme.Unset) -class Link(Base): - """A link representation.""" - - href = wtypes.text - "The url of a link" - - rel = wtypes.text - "The name of a link" - - @classmethod - def sample(cls): - return cls(href=('http://localhost:8777/v2/meters/volume?' - 'q.field=resource_id&' - 'q.value=bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'), - rel='volume' - ) - - class Query(Base): """Query filter.""" diff --git a/ceilometer/api/controllers/v2/capabilities.py b/ceilometer/api/controllers/v2/capabilities.py index 2a8c70d4..474d82e5 100644 --- a/ceilometer/api/controllers/v2/capabilities.py +++ b/ceilometer/api/controllers/v2/capabilities.py @@ -39,8 +39,6 @@ class Capabilities(base.Base): api = {wtypes.text: bool} "A flattened dictionary of API capabilities" - storage = {wtypes.text: bool} - "A flattened dictionary of storage capabilities" event_storage = {wtypes.text: bool} "A flattened dictionary of event storage capabilities" @@ -48,30 +46,8 @@ class Capabilities(base.Base): def sample(cls): return cls( api=_flatten_capabilities({ - 'meters': {'query': {'simple': True, - 'metadata': True}}, - 'resources': {'query': {'simple': True, - 'metadata': True}}, - 'samples': {'query': {'simple': True, - 'metadata': True, - 'complex': True}}, - 'statistics': {'groupby': True, - 'query': {'simple': True, - 'metadata': True}, - 'aggregation': {'standard': True, - 'selectable': { - 'max': True, - 'min': True, - 'sum': True, - 'avg': True, - 'count': True, - 'stddev': True, - 'cardinality': True, - 'quartile': False}}}, 'events': {'query': {'simple': True}}, }), - storage=_flatten_capabilities( - {'storage': {'production_ready': True}}), event_storage=_flatten_capabilities( {'storage': {'production_ready': True}}), ) @@ -88,13 +64,10 @@ class CapabilitiesController(rest.RestController): """ # variation in API capabilities is effectively determined by # the lack of strict feature parity across storage drivers - conn = pecan.request.storage_conn event_conn = pecan.request.event_storage_conn - driver_capabilities = conn.get_capabilities().copy() - driver_capabilities['events'] = event_conn.get_capabilities()['events'] - driver_perf = conn.get_storage_capabilities() + driver_capabilities = {'events': + event_conn.get_capabilities()['events']} event_driver_perf = event_conn.get_storage_capabilities() return Capabilities(api=_flatten_capabilities(driver_capabilities), - storage=_flatten_capabilities(driver_perf), event_storage=_flatten_capabilities( event_driver_perf)) diff --git a/ceilometer/api/controllers/v2/meters.py b/ceilometer/api/controllers/v2/meters.py deleted file mode 100644 index 9aa500eb..00000000 --- a/ceilometer/api/controllers/v2/meters.py +++ /dev/null @@ -1,505 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 IBM Corp. -# Copyright 2013 eNovance -# Copyright Ericsson AB 2013. All rights reserved -# Copyright 2014 Hewlett-Packard Company -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 -import datetime - -from oslo_config import cfg -from oslo_log import log -from oslo_utils import strutils -from oslo_utils import timeutils -import pecan -from pecan import rest -import six -import wsme -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from ceilometer.api.controllers.v2 import base -from ceilometer.api.controllers.v2 import utils as v2_utils -from ceilometer.api import rbac -from ceilometer.i18n import _ -from ceilometer.publisher import utils as publisher_utils -from ceilometer import sample -from ceilometer import storage -from ceilometer.storage import base as storage_base -from ceilometer import utils - -LOG = log.getLogger(__name__) - - -class OldSample(base.Base): - """A single measurement for a given meter and resource. - - This class is deprecated in favor of Sample. - """ - - source = wtypes.text - "The ID of the source that identifies where the sample comes from" - - counter_name = wsme.wsattr(wtypes.text, mandatory=True) - "The name of the meter" - # FIXME(dhellmann): Make this meter_name? - - counter_type = wsme.wsattr(wtypes.text, mandatory=True) - "The type of the meter (see :ref:`measurements`)" - # FIXME(dhellmann): Make this meter_type? - - counter_unit = wsme.wsattr(wtypes.text, mandatory=True) - "The unit of measure for the value in counter_volume" - # FIXME(dhellmann): Make this meter_unit? - - counter_volume = wsme.wsattr(float, mandatory=True) - "The actual measured value" - - user_id = wtypes.text - "The ID of the user who last triggered an update to the resource" - - project_id = wtypes.text - "The ID of the project or tenant that owns the resource" - - resource_id = wsme.wsattr(wtypes.text, mandatory=True) - "The ID of the :class:`Resource` for which the measurements are taken" - - timestamp = datetime.datetime - "UTC date and time when the measurement was made" - - recorded_at = datetime.datetime - "When the sample has been recorded." - - resource_metadata = {wtypes.text: wtypes.text} - "Arbitrary metadata associated with the resource" - - message_id = wtypes.text - "A unique identifier for the sample" - - def __init__(self, counter_volume=None, resource_metadata=None, - timestamp=None, **kwds): - resource_metadata = resource_metadata or {} - if counter_volume is not None: - counter_volume = float(counter_volume) - resource_metadata = v2_utils.flatten_metadata(resource_metadata) - # this is to make it easier for clients to pass a timestamp in - if timestamp and isinstance(timestamp, six.string_types): - timestamp = timeutils.parse_isotime(timestamp) - - super(OldSample, self).__init__(counter_volume=counter_volume, - resource_metadata=resource_metadata, - timestamp=timestamp, **kwds) - - if self.resource_metadata in (wtypes.Unset, None): - self.resource_metadata = {} - - @classmethod - def sample(cls): - return cls(source='openstack', - counter_name='instance', - counter_type='gauge', - counter_unit='instance', - counter_volume=1, - resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - project_id='35b17138-b364-4e6a-a131-8f3099c5be68', - user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff', - recorded_at=datetime.datetime(2015, 1, 1, 12, 0, 0, 0), - timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0), - resource_metadata={'name1': 'value1', - 'name2': 'value2'}, - message_id='5460acce-4fd6-480d-ab18-9735ec7b1996', - ) - - -class Statistics(base.Base): - """Computed statistics for a query.""" - - groupby = {wtypes.text: wtypes.text} - "Dictionary of field names for group, if groupby statistics are requested" - - unit = wtypes.text - "The unit type of the data set" - - min = float - "The minimum volume seen in the data" - - max = float - "The maximum volume seen in the data" - - avg = float - "The average of all of the volume values seen in the data" - - sum = float - "The total of all of the volume values seen in the data" - - count = int - "The number of samples seen" - - aggregate = {wtypes.text: float} - "The selectable aggregate value(s)" - - duration = float - "The difference, in seconds, between the oldest and newest timestamp" - - duration_start = datetime.datetime - "UTC date and time of the earliest timestamp, or the query start time" - - duration_end = datetime.datetime - "UTC date and time of the oldest timestamp, or the query end time" - - period = int - "The difference, in seconds, between the period start and end" - - period_start = datetime.datetime - "UTC date and time of the period start" - - period_end = datetime.datetime - "UTC date and time of the period end" - - def __init__(self, start_timestamp=None, end_timestamp=None, **kwds): - super(Statistics, self).__init__(**kwds) - self._update_duration(start_timestamp, end_timestamp) - - def _update_duration(self, start_timestamp, end_timestamp): - # "Clamp" the timestamps we return to the original time - # range, excluding the offset. - if (start_timestamp and - self.duration_start and - self.duration_start < start_timestamp): - self.duration_start = start_timestamp - LOG.debug('clamping min timestamp to range') - if (end_timestamp and - self.duration_end and - self.duration_end > end_timestamp): - self.duration_end = end_timestamp - LOG.debug('clamping max timestamp to range') - - # If we got valid timestamps back, compute a duration in seconds. - # - # If the min > max after clamping then we know the - # timestamps on the samples fell outside of the time - # range we care about for the query, so treat them as - # "invalid." - # - # If the timestamps are invalid, return None as a - # sentinel indicating that there is something "funny" - # about the range. - if (self.duration_start and - self.duration_end and - self.duration_start <= self.duration_end): - self.duration = timeutils.delta_seconds(self.duration_start, - self.duration_end) - else: - self.duration_start = self.duration_end = self.duration = None - - @classmethod - def sample(cls): - return cls(unit='GiB', - min=1, - max=9, - avg=4.5, - sum=45, - count=10, - duration_start=datetime.datetime(2013, 1, 4, 16, 42), - duration_end=datetime.datetime(2013, 1, 4, 16, 47), - period=7200, - period_start=datetime.datetime(2013, 1, 4, 16, 00), - period_end=datetime.datetime(2013, 1, 4, 18, 00), - ) - - -class Aggregate(base.Base): - - func = wsme.wsattr(wtypes.text, mandatory=True) - "The aggregation function name" - - param = wsme.wsattr(wtypes.text, default=None) - "The paramter to the aggregation function" - - def __init__(self, **kwargs): - super(Aggregate, self).__init__(**kwargs) - - @staticmethod - def validate(aggregate): - valid_agg = (storage_base.Connection.CAPABILITIES.get('statistics', {}) - .get('aggregation', {}).get('selectable', {}).keys()) - if aggregate.func not in valid_agg: - msg = _('Invalid aggregation function: %s') % aggregate.func - raise base.ClientSideError(msg) - return aggregate - - @classmethod - def sample(cls): - return cls(func='cardinality', - param='resource_id') - - -def _validate_groupby_fields(groupby_fields): - """Checks that the list of groupby fields from request is valid. - - If all fields are valid, returns fields with duplicates removed. - """ - # NOTE(terriyu): Currently, metadata fields are supported in our - # group by statistics implementation only for mongodb - valid_fields = set(['user_id', 'resource_id', 'project_id', 'source', - 'resource_metadata.instance_type']) - - invalid_fields = set(groupby_fields) - valid_fields - if invalid_fields: - raise wsme.exc.UnknownArgument(invalid_fields, - "Invalid groupby fields") - - # Remove duplicate fields - # NOTE(terriyu): This assumes that we don't care about the order of the - # group by fields. - return list(set(groupby_fields)) - - -class MeterController(rest.RestController): - """Manages operations on a single meter.""" - _custom_actions = { - 'statistics': ['GET'], - } - - def __init__(self, meter_name): - pecan.request.context['meter_name'] = meter_name - self.meter_name = meter_name - - @wsme_pecan.wsexpose([OldSample], [base.Query], int) - def get_all(self, q=None, limit=None): - """Return samples for the meter. - - :param q: Filter rules for the data to be returned. - :param limit: Maximum number of samples to return. - """ - - rbac.enforce('get_samples', pecan.request) - - q = q or [] - limit = v2_utils.enforce_limit(limit) - kwargs = v2_utils.query_to_kwargs(q, storage.SampleFilter.__init__) - kwargs['meter'] = self.meter_name - f = storage.SampleFilter(**kwargs) - return [OldSample.from_db_model(e) - for e in pecan.request.storage_conn.get_samples(f, limit=limit) - ] - - @wsme_pecan.wsexpose([OldSample], str, body=[OldSample], status_code=201) - def post(self, direct='', samples=None): - """Post a list of new Samples to Telemetry. - - :param direct: a flag indicates whether the samples will be posted - directly to storage or not. - :param samples: a list of samples within the request body. - """ - rbac.enforce('create_samples', pecan.request) - - direct = strutils.bool_from_string(direct) - if not samples: - msg = _('Samples should be included in request body') - raise base.ClientSideError(msg) - - now = timeutils.utcnow() - auth_project = rbac.get_limited_to_project(pecan.request.headers) - def_source = pecan.request.cfg.sample_source - def_project_id = pecan.request.headers.get('X-Project-Id') - def_user_id = pecan.request.headers.get('X-User-Id') - - published_samples = [] - for s in samples: - if self.meter_name != s.counter_name: - raise wsme.exc.InvalidInput('counter_name', s.counter_name, - 'should be %s' % self.meter_name) - - if s.message_id: - raise wsme.exc.InvalidInput('message_id', s.message_id, - 'The message_id must not be set') - - if s.counter_type not in sample.TYPES: - raise wsme.exc.InvalidInput('counter_type', s.counter_type, - 'The counter type must be: ' + - ', '.join(sample.TYPES)) - - s.user_id = (s.user_id or def_user_id) - s.project_id = (s.project_id or def_project_id) - s.source = '%s:%s' % (s.project_id, (s.source or def_source)) - s.timestamp = (s.timestamp or now) - - if auth_project and auth_project != s.project_id: - # non admin user trying to cross post to another project_id - auth_msg = 'can not post samples to other projects' - raise wsme.exc.InvalidInput('project_id', s.project_id, - auth_msg) - - published_sample = sample.Sample( - name=s.counter_name, - type=s.counter_type, - unit=s.counter_unit, - volume=s.counter_volume, - user_id=s.user_id, - project_id=s.project_id, - resource_id=s.resource_id, - timestamp=s.timestamp.isoformat(), - resource_metadata=utils.restore_nesting(s.resource_metadata, - separator='.'), - source=s.source) - s.message_id = published_sample.id - - sample_dict = publisher_utils.meter_message_from_counter( - published_sample, cfg.CONF.publisher.telemetry_secret) - if direct: - ts = timeutils.parse_isotime(sample_dict['timestamp']) - sample_dict['timestamp'] = timeutils.normalize_time(ts) - pecan.request.storage_conn.record_metering_data(sample_dict) - else: - published_samples.append(sample_dict) - if not direct: - pecan.request.notifier.sample( - {'user': def_user_id, - 'tenant': def_project_id, - 'is_admin': True}, - 'telemetry.api', - {'samples': published_samples}) - - return samples - - @wsme_pecan.wsexpose([Statistics], - [base.Query], [six.text_type], int, [Aggregate]) - def statistics(self, q=None, groupby=None, period=None, aggregate=None): - """Computes the statistics of the samples in the time range given. - - :param q: Filter rules for the data to be returned. - :param groupby: Fields for group by aggregation - :param period: Returned result will be an array of statistics for a - period long of that number of seconds. - :param aggregate: The selectable aggregation functions to be applied. - """ - - rbac.enforce('compute_statistics', pecan.request) - - q = q or [] - groupby = groupby or [] - aggregate = aggregate or [] - - if period and period < 0: - raise base.ClientSideError(_("Period must be positive.")) - - kwargs = v2_utils.query_to_kwargs(q, storage.SampleFilter.__init__) - kwargs['meter'] = self.meter_name - f = storage.SampleFilter(**kwargs) - g = _validate_groupby_fields(groupby) - - aggregate = utils.uniq(aggregate, ['func', 'param']) - # Find the original timestamp in the query to use for clamping - # the duration returned in the statistics. - start = end = None - for i in q: - if i.field == 'timestamp' and i.op in ('lt', 'le'): - end = timeutils.parse_isotime(i.value).replace( - tzinfo=None) - elif i.field == 'timestamp' and i.op in ('gt', 'ge'): - start = timeutils.parse_isotime(i.value).replace( - tzinfo=None) - - try: - computed = pecan.request.storage_conn.get_meter_statistics( - f, period, g, aggregate) - return [Statistics(start_timestamp=start, - end_timestamp=end, - **c.as_dict()) - for c in computed] - except OverflowError as e: - params = dict(period=period, err=e) - raise base.ClientSideError( - _("Invalid period %(period)s: %(err)s") % params) - - -class Meter(base.Base): - """One category of measurements.""" - - name = wtypes.text - "The unique name for the meter" - - type = wtypes.Enum(str, *sample.TYPES) - "The meter type (see :ref:`measurements`)" - - unit = wtypes.text - "The unit of measure" - - resource_id = wtypes.text - "The ID of the :class:`Resource` for which the measurements are taken" - - project_id = wtypes.text - "The ID of the project or tenant that owns the resource" - - user_id = wtypes.text - "The ID of the user who last triggered an update to the resource" - - source = wtypes.text - "The ID of the source that identifies where the meter comes from" - - meter_id = wtypes.text - "The unique identifier for the meter" - - def __init__(self, **kwargs): - meter_id = '%s+%s' % (kwargs['resource_id'], kwargs['name']) - # meter_id is of type Unicode but base64.encodestring() only accepts - # strings. See bug #1333177 - meter_id = base64.b64encode(meter_id.encode('utf-8')) - kwargs['meter_id'] = meter_id - super(Meter, self).__init__(**kwargs) - - @classmethod - def sample(cls): - return cls(name='instance', - type='gauge', - unit='instance', - resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - project_id='35b17138-b364-4e6a-a131-8f3099c5be68', - user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff', - source='openstack', - ) - - -class MetersController(rest.RestController): - """Works on meters.""" - - @pecan.expose() - def _lookup(self, meter_name, *remainder): - return MeterController(meter_name), remainder - - @wsme_pecan.wsexpose([Meter], [base.Query], int, str) - def get_all(self, q=None, limit=None, unique=''): - """Return all known meters, based on the data recorded so far. - - :param q: Filter rules for the meters to be returned. - :param unique: flag to indicate unique meters to be returned. - """ - - rbac.enforce('get_meters', pecan.request) - - q = q or [] - - # Timestamp field is not supported for Meter queries - limit = v2_utils.enforce_limit(limit) - kwargs = v2_utils.query_to_kwargs( - q, pecan.request.storage_conn.get_meters, - ['limit'], allow_timestamps=False) - return [Meter.from_db_model(m) - for m in pecan.request.storage_conn.get_meters( - limit=limit, unique=strutils.bool_from_string(unique), - **kwargs)] diff --git a/ceilometer/api/controllers/v2/query.py b/ceilometer/api/controllers/v2/query.py deleted file mode 100644 index 1c5af060..00000000 --- a/ceilometer/api/controllers/v2/query.py +++ /dev/null @@ -1,359 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 IBM Corp. -# Copyright 2013 eNovance -# Copyright Ericsson AB 2013. All rights reserved -# Copyright 2014 Hewlett-Packard Company -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -import jsonschema -from oslo_log import log -from oslo_utils import timeutils -import pecan -from pecan import rest -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from ceilometer.api.controllers.v2 import base -from ceilometer.api.controllers.v2 import samples -from ceilometer.api.controllers.v2 import utils as v2_utils -from ceilometer.api import rbac -from ceilometer.i18n import _ -from ceilometer import storage -from ceilometer import utils - -LOG = log.getLogger(__name__) - - -class ComplexQuery(base.Base): - """Holds a sample query encoded in json.""" - - filter = wtypes.text - "The filter expression encoded in json." - - orderby = wtypes.text - "List of single-element dicts for specifying the ordering of the results." - - limit = int - "The maximum number of results to be returned." - - @classmethod - def sample(cls): - return cls(filter='{"and": [{"and": [{"=": ' + - '{"counter_name": "cpu_util"}}, ' + - '{">": {"counter_volume": 0.23}}, ' + - '{"<": {"counter_volume": 0.26}}]}, ' + - '{"or": [{"and": [{">": ' + - '{"timestamp": "2013-12-01T18:00:00"}}, ' + - '{"<": ' + - '{"timestamp": "2013-12-01T18:15:00"}}]}, ' + - '{"and": [{">": ' + - '{"timestamp": "2013-12-01T18:30:00"}}, ' + - '{"<": ' + - '{"timestamp": "2013-12-01T18:45:00"}}]}]}]}', - orderby='[{"counter_volume": "ASC"}, ' + - '{"timestamp": "DESC"}]', - limit=42 - ) - - -def _list_to_regexp(items, regexp_prefix=""): - regexp = ["^%s$" % item for item in items] - regexp = regexp_prefix + "|".join(regexp) - return regexp - - -class ValidatedComplexQuery(object): - complex_operators = ["and", "or"] - order_directions = ["asc", "desc"] - simple_ops = ["=", "!=", "<", ">", "<=", "=<", ">=", "=>", "=~"] - regexp_prefix = "(?i)" - - complex_ops = _list_to_regexp(complex_operators, regexp_prefix) - simple_ops = _list_to_regexp(simple_ops, regexp_prefix) - order_directions = _list_to_regexp(order_directions, regexp_prefix) - - timestamp_fields = ["timestamp", "state_timestamp"] - - def __init__(self, query, db_model, additional_name_mapping=None, - metadata_allowed=False): - additional_name_mapping = additional_name_mapping or {} - self.name_mapping = {"user": "user_id", - "project": "project_id"} - self.name_mapping.update(additional_name_mapping) - valid_keys = db_model.get_field_names() - valid_keys = list(valid_keys) + list(self.name_mapping.keys()) - valid_fields = _list_to_regexp(valid_keys) - - if metadata_allowed: - valid_filter_fields = valid_fields + "|^metadata\.[\S]+$" - else: - valid_filter_fields = valid_fields - - schema_value = { - "oneOf": [{"type": "string"}, - {"type": "number"}, - {"type": "boolean"}], - "minProperties": 1, - "maxProperties": 1} - - schema_value_in = { - "type": "array", - "items": {"oneOf": [{"type": "string"}, - {"type": "number"}]}, - "minItems": 1} - - schema_field = { - "type": "object", - "patternProperties": {valid_filter_fields: schema_value}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - schema_field_in = { - "type": "object", - "patternProperties": {valid_filter_fields: schema_value_in}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - schema_leaf_in = { - "type": "object", - "patternProperties": {"(?i)^in$": schema_field_in}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - schema_leaf_simple_ops = { - "type": "object", - "patternProperties": {self.simple_ops: schema_field}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - schema_and_or_array = { - "type": "array", - "items": {"$ref": "#"}, - "minItems": 2} - - schema_and_or = { - "type": "object", - "patternProperties": {self.complex_ops: schema_and_or_array}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - schema_not = { - "type": "object", - "patternProperties": {"(?i)^not$": {"$ref": "#"}}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1} - - self.schema = { - "oneOf": [{"$ref": "#/definitions/leaf_simple_ops"}, - {"$ref": "#/definitions/leaf_in"}, - {"$ref": "#/definitions/and_or"}, - {"$ref": "#/definitions/not"}], - "minProperties": 1, - "maxProperties": 1, - "definitions": {"leaf_simple_ops": schema_leaf_simple_ops, - "leaf_in": schema_leaf_in, - "and_or": schema_and_or, - "not": schema_not}} - - self.orderby_schema = { - "type": "array", - "items": { - "type": "object", - "patternProperties": - {valid_fields: - {"type": "string", - "pattern": self.order_directions}}, - "additionalProperties": False, - "minProperties": 1, - "maxProperties": 1}} - - self.original_query = query - - def validate(self, visibility_field): - """Validates the query content and does the necessary conversions.""" - if self.original_query.filter is wtypes.Unset: - self.filter_expr = None - else: - try: - self.filter_expr = json.loads(self.original_query.filter) - self._validate_filter(self.filter_expr) - except (ValueError, jsonschema.exceptions.ValidationError) as e: - raise base.ClientSideError( - _("Filter expression not valid: %s") % e) - self._replace_isotime_with_datetime(self.filter_expr) - self._convert_operator_to_lower_case(self.filter_expr) - self._normalize_field_names_for_db_model(self.filter_expr) - - self._force_visibility(visibility_field) - - if self.original_query.orderby is wtypes.Unset: - self.orderby = None - else: - try: - self.orderby = json.loads(self.original_query.orderby) - self._validate_orderby(self.orderby) - except (ValueError, jsonschema.exceptions.ValidationError) as e: - raise base.ClientSideError( - _("Order-by expression not valid: %s") % e) - self._convert_orderby_to_lower_case(self.orderby) - self._normalize_field_names_in_orderby(self.orderby) - - self.limit = (None if self.original_query.limit is wtypes.Unset - else self.original_query.limit) - - self.limit = v2_utils.enforce_limit(self.limit) - - @staticmethod - def _convert_orderby_to_lower_case(orderby): - for orderby_field in orderby: - utils.lowercase_values(orderby_field) - - def _normalize_field_names_in_orderby(self, orderby): - for orderby_field in orderby: - self._replace_field_names(orderby_field) - - def _traverse_postorder(self, tree, visitor): - op = list(tree.keys())[0] - if op.lower() in self.complex_operators: - for i, operand in enumerate(tree[op]): - self._traverse_postorder(operand, visitor) - if op.lower() == "not": - self._traverse_postorder(tree[op], visitor) - - visitor(tree) - - def _check_cross_project_references(self, own_project_id, - visibility_field): - """Do not allow other than own_project_id.""" - def check_project_id(subfilter): - op, value = list(subfilter.items())[0] - if (op.lower() not in self.complex_operators - and list(value.keys())[0] == visibility_field - and value[visibility_field] != own_project_id): - raise base.ProjectNotAuthorized(value[visibility_field]) - - self._traverse_postorder(self.filter_expr, check_project_id) - - def _force_visibility(self, visibility_field): - """Force visibility field. - - If the tenant is not admin insert an extra - "and =" clause to the query. - """ - authorized_project = rbac.get_limited_to_project(pecan.request.headers) - is_admin = authorized_project is None - if not is_admin: - self._restrict_to_project(authorized_project, visibility_field) - self._check_cross_project_references(authorized_project, - visibility_field) - - def _restrict_to_project(self, project_id, visibility_field): - restriction = {"=": {visibility_field: project_id}} - if self.filter_expr is None: - self.filter_expr = restriction - else: - self.filter_expr = {"and": [restriction, self.filter_expr]} - - def _replace_isotime_with_datetime(self, filter_expr): - def replace_isotime(subfilter): - op, value = list(subfilter.items())[0] - if op.lower() not in self.complex_operators: - field = list(value.keys())[0] - if field in self.timestamp_fields: - date_time = self._convert_to_datetime(subfilter[op][field]) - subfilter[op][field] = date_time - - self._traverse_postorder(filter_expr, replace_isotime) - - def _normalize_field_names_for_db_model(self, filter_expr): - def _normalize_field_names(subfilter): - op, value = list(subfilter.items())[0] - if op.lower() not in self.complex_operators: - self._replace_field_names(value) - self._traverse_postorder(filter_expr, - _normalize_field_names) - - def _replace_field_names(self, subfilter): - field, value = list(subfilter.items())[0] - if field in self.name_mapping: - del subfilter[field] - subfilter[self.name_mapping[field]] = value - if field.startswith("metadata."): - del subfilter[field] - subfilter["resource_" + field] = value - - def _convert_operator_to_lower_case(self, filter_expr): - self._traverse_postorder(filter_expr, utils.lowercase_keys) - - @staticmethod - def _convert_to_datetime(isotime): - try: - date_time = timeutils.parse_isotime(isotime) - date_time = date_time.replace(tzinfo=None) - return date_time - except ValueError: - LOG.exception(_("String %s is not a valid isotime") % isotime) - msg = _('Failed to parse the timestamp value %s') % isotime - raise base.ClientSideError(msg) - - def _validate_filter(self, filter_expr): - jsonschema.validate(filter_expr, self.schema) - - def _validate_orderby(self, orderby_expr): - jsonschema.validate(orderby_expr, self.orderby_schema) - - -class QuerySamplesController(rest.RestController): - """Provides complex query possibilities for samples.""" - - @wsme_pecan.wsexpose([samples.Sample], body=ComplexQuery) - def post(self, body): - """Define query for retrieving Sample data. - - :param body: Query rules for the samples to be returned. - """ - - rbac.enforce('query_sample', pecan.request) - - sample_name_mapping = {"resource": "resource_id", - "meter": "counter_name", - "type": "counter_type", - "unit": "counter_unit", - "volume": "counter_volume"} - - query = ValidatedComplexQuery(body, - storage.models.Sample, - sample_name_mapping, - metadata_allowed=True) - query.validate(visibility_field="project_id") - conn = pecan.request.storage_conn - return [samples.Sample.from_db_model(s) - for s in conn.query_samples(query.filter_expr, - query.orderby, - query.limit)] - - -class QueryController(rest.RestController): - - samples = QuerySamplesController() diff --git a/ceilometer/api/controllers/v2/resources.py b/ceilometer/api/controllers/v2/resources.py deleted file mode 100644 index b9918c21..00000000 --- a/ceilometer/api/controllers/v2/resources.py +++ /dev/null @@ -1,157 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 IBM Corp. -# Copyright 2013 eNovance -# Copyright Ericsson AB 2013. All rights reserved -# Copyright 2014 Hewlett-Packard Company -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import urllib - -import pecan -from pecan import rest -import six -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from ceilometer.api.controllers.v2 import base -from ceilometer.api.controllers.v2 import utils -from ceilometer.api import rbac -from ceilometer.i18n import _ - - -class Resource(base.Base): - """An externally defined object for which samples have been received.""" - - resource_id = wtypes.text - "The unique identifier for the resource" - - project_id = wtypes.text - "The ID of the owning project or tenant" - - user_id = wtypes.text - "The ID of the user who created the resource or updated it last" - - first_sample_timestamp = datetime.datetime - "UTC date & time not later than the first sample known for this resource" - - last_sample_timestamp = datetime.datetime - "UTC date & time not earlier than the last sample known for this resource" - - metadata = {wtypes.text: wtypes.text} - "Arbitrary metadata associated with the resource" - - links = [base.Link] - "A list containing a self link and associated meter links" - - source = wtypes.text - "The source where the resource come from" - - def __init__(self, metadata=None, **kwds): - metadata = metadata or {} - metadata = utils.flatten_metadata(metadata) - super(Resource, self).__init__(metadata=metadata, **kwds) - - @classmethod - def sample(cls): - return cls( - resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - project_id='35b17138-b364-4e6a-a131-8f3099c5be68', - user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff', - timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0), - source="openstack", - metadata={'name1': 'value1', - 'name2': 'value2'}, - links=[ - base.Link(href=('http://localhost:8777/v2/resources/' - 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'), - rel='self'), - base.Link(href=('http://localhost:8777/v2/meters/volume?' - 'q.field=resource_id&q.value=' - 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'), - rel='volume') - ], - ) - - -class ResourcesController(rest.RestController): - """Works on resources.""" - - @staticmethod - def _make_link(rel_name, url, type, type_arg, query=None): - query_str = '' - if query: - query_str = '?q.field=%s&q.value=%s' % (query['field'], - query['value']) - return base.Link(href='%s/v2/%s/%s%s' % (url, type, - type_arg, query_str), - rel=rel_name) - - def _resource_links(self, resource_id, meter_links=1): - links = [self._make_link('self', pecan.request.application_url, - 'resources', resource_id)] - if meter_links: - for meter in pecan.request.storage_conn.get_meters( - resource=resource_id): - query = {'field': 'resource_id', 'value': resource_id} - links.append(self._make_link(meter.name, - pecan.request.application_url, - 'meters', meter.name, - query=query)) - return links - - @wsme_pecan.wsexpose(Resource, six.text_type) - def get_one(self, resource_id): - """Retrieve details about one resource. - - :param resource_id: The UUID of the resource. - """ - - rbac.enforce('get_resource', pecan.request) - # In case we have special character in resource id, for example, swift - # can generate samples with resource id like - # 29f809d9-88bb-4c40-b1ba-a77a1fcf8ceb/glance - resource_id = urllib.unquote(resource_id) - - authorized_project = rbac.get_limited_to_project(pecan.request.headers) - resources = list(pecan.request.storage_conn.get_resources( - resource=resource_id, project=authorized_project)) - if not resources: - raise base.EntityNotFound(_('Resource'), resource_id) - return Resource.from_db_and_links(resources[0], - self._resource_links(resource_id)) - - @wsme_pecan.wsexpose([Resource], [base.Query], int, int) - def get_all(self, q=None, limit=None, meter_links=1): - """Retrieve definitions of all of the resources. - - :param q: Filter rules for the resources to be returned. - :param meter_links: option to include related meter links - """ - - rbac.enforce('get_resources', pecan.request) - - q = q or [] - limit = utils.enforce_limit(limit) - kwargs = utils.query_to_kwargs( - q, pecan.request.storage_conn.get_resources, ['limit']) - resources = [ - Resource.from_db_and_links(r, - self._resource_links(r.resource_id, - meter_links)) - for r in pecan.request.storage_conn.get_resources(limit=limit, - **kwargs)] - return resources diff --git a/ceilometer/api/controllers/v2/root.py b/ceilometer/api/controllers/v2/root.py index 2387f9f5..1701325e 100644 --- a/ceilometer/api/controllers/v2/root.py +++ b/ceilometer/api/controllers/v2/root.py @@ -18,82 +18,8 @@ # License for the specific language governing permissions and limitations # under the License. -from keystoneauth1 import exceptions -from oslo_config import cfg -from oslo_log import log -from oslo_utils import strutils -import pecan - from ceilometer.api.controllers.v2 import capabilities from ceilometer.api.controllers.v2 import events -from ceilometer.api.controllers.v2 import meters -from ceilometer.api.controllers.v2 import query -from ceilometer.api.controllers.v2 import resources -from ceilometer.api.controllers.v2 import samples -from ceilometer.i18n import _, _LW -from ceilometer import keystone_client - - -API_OPTS = [ - cfg.BoolOpt('gnocchi_is_enabled', - default=None, - help=('Set True to disable resource/meter/sample URLs. ' - 'Default autodetection by querying keystone.')), - cfg.BoolOpt('aodh_is_enabled', - default=None, - help=('Set True to redirect alarms URLs to aodh. ' - 'Default autodetection by querying keystone.')), - cfg.StrOpt('aodh_url', - default=None, - help=('The endpoint of Aodh to redirect alarms URLs ' - 'to Aodh API. Default autodetection by querying ' - 'keystone.')), -] - -cfg.CONF.register_opts(API_OPTS, group='api') -cfg.CONF.import_opt('meter_dispatchers', 'ceilometer.dispatcher') - -LOG = log.getLogger(__name__) - - -def gnocchi_abort(): - pecan.abort(410, ("This telemetry installation is configured to use " - "Gnocchi. Please use the Gnocchi API available on " - "the metric endpoint to retrieve data.")) - - -def aodh_abort(): - pecan.abort(410, _("alarms URLs is unavailable when Aodh is " - "disabled or unavailable.")) - - -def aodh_redirect(url): - # NOTE(sileht): we use 307 and not 301 or 302 to allow - # client to redirect POST/PUT/DELETE/... - # FIXME(sileht): it would be better to use 308, but webob - # doesn't handle it :( - # https://github.com/Pylons/webob/pull/207 - pecan.redirect(location=url + pecan.request.path_qs, - code=307) - - -class QueryController(object): - def __init__(self, gnocchi_is_enabled=False, aodh_url=None): - self.gnocchi_is_enabled = gnocchi_is_enabled - self.aodh_url = aodh_url - - @pecan.expose() - def _lookup(self, kind, *remainder): - if kind == 'alarms' and self.aodh_url: - aodh_redirect(self.aodh_url) - elif kind == 'alarms': - aodh_abort() - elif kind == 'samples' and self.gnocchi_is_enabled: - gnocchi_abort() - elif kind == 'samples': - return query.QuerySamplesController(), remainder - else: - pecan.abort(404) class V2Controller(object): @@ -102,94 +28,3 @@ class V2Controller(object): event_types = events.EventTypesController() events = events.EventsController() capabilities = capabilities.CapabilitiesController() - - def __init__(self): - self._gnocchi_is_enabled = None - self._aodh_is_enabled = None - self._aodh_url = None - - @property - def gnocchi_is_enabled(self): - if self._gnocchi_is_enabled is None: - if cfg.CONF.api.gnocchi_is_enabled is not None: - self._gnocchi_is_enabled = cfg.CONF.api.gnocchi_is_enabled - - elif ("gnocchi" not in cfg.CONF.meter_dispatchers - or "database" in cfg.CONF.meter_dispatchers): - self._gnocchi_is_enabled = False - else: - try: - catalog = keystone_client.get_service_catalog( - keystone_client.get_client()) - catalog.url_for(service_type='metric') - except exceptions.EndpointNotFound: - self._gnocchi_is_enabled = False - except exceptions.ClientException: - LOG.warning(_LW("Can't connect to keystone, assuming " - "gnocchi is disabled and retry later")) - else: - self._gnocchi_is_enabled = True - LOG.warning(_LW("ceilometer-api started with gnocchi " - "enabled. The resources/meters/samples " - "URLs are disabled.")) - return self._gnocchi_is_enabled - - @property - def aodh_url(self): - if self._aodh_url is None: - if cfg.CONF.api.aodh_is_enabled is False: - self._aodh_url = "" - elif cfg.CONF.api.aodh_url is not None: - self._aodh_url = self._normalize_aodh_url( - cfg.CONF.api.aodh_url) - else: - try: - catalog = keystone_client.get_service_catalog( - keystone_client.get_client()) - self._aodh_url = self._normalize_aodh_url( - catalog.url_for(service_type='alarming')) - except exceptions.EndpointNotFound: - self._aodh_url = "" - except exceptions.ClientException: - LOG.warning(_LW("Can't connect to keystone, assuming aodh " - "is disabled and retry later.")) - else: - LOG.warning(_LW("ceilometer-api started with aodh " - "enabled. Alarms URLs will be redirected " - "to aodh endpoint.")) - return self._aodh_url - - @pecan.expose() - def _lookup(self, kind, *remainder): - if (kind in ['meters', 'resources', 'samples'] - and self.gnocchi_is_enabled): - if kind == 'meters' and pecan.request.method == 'POST': - direct = pecan.request.params.get('direct', '') - if strutils.bool_from_string(direct): - pecan.abort(400, _('direct option cannot be true when ' - 'Gnocchi is enabled.')) - return meters.MetersController(), remainder - gnocchi_abort() - elif kind == 'meters': - return meters.MetersController(), remainder - elif kind == 'resources': - return resources.ResourcesController(), remainder - elif kind == 'samples': - return samples.SamplesController(), remainder - elif kind == 'query': - return QueryController( - gnocchi_is_enabled=self.gnocchi_is_enabled, - aodh_url=self.aodh_url, - ), remainder - elif kind == 'alarms' and (not self.aodh_url): - aodh_abort() - elif kind == 'alarms' and self.aodh_url: - aodh_redirect(self.aodh_url) - else: - pecan.abort(404) - - @staticmethod - def _normalize_aodh_url(url): - if url.endswith("/"): - return url[:-1] - return url diff --git a/ceilometer/api/controllers/v2/samples.py b/ceilometer/api/controllers/v2/samples.py deleted file mode 100644 index 05ded82f..00000000 --- a/ceilometer/api/controllers/v2/samples.py +++ /dev/null @@ -1,145 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 IBM Corp. -# Copyright 2013 eNovance -# Copyright Ericsson AB 2013. All rights reserved -# Copyright 2014 Hewlett-Packard Company -# Copyright 2015 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import uuid - -import pecan -from pecan import rest -from wsme import types as wtypes -import wsmeext.pecan as wsme_pecan - -from ceilometer.api.controllers.v2 import base -from ceilometer.api.controllers.v2 import utils -from ceilometer.api import rbac -from ceilometer.i18n import _ -from ceilometer import sample -from ceilometer import storage - - -class Sample(base.Base): - """One measurement.""" - - id = wtypes.text - "The unique identifier for the sample." - - meter = wtypes.text - "The meter name this sample is for." - - type = wtypes.Enum(str, *sample.TYPES) - "The meter type (see :ref:`meter_types`)" - - unit = wtypes.text - "The unit of measure." - - volume = float - "The metered value." - - user_id = wtypes.text - "The user this sample was taken for." - - project_id = wtypes.text - "The project this sample was taken for." - - resource_id = wtypes.text - "The :class:`Resource` this sample was taken for." - - source = wtypes.text - "The source that identifies where the sample comes from." - - timestamp = datetime.datetime - "When the sample has been generated." - - recorded_at = datetime.datetime - "When the sample has been recorded." - - metadata = {wtypes.text: wtypes.text} - "Arbitrary metadata associated with the sample." - - @classmethod - def from_db_model(cls, m): - return cls(id=m.message_id, - meter=m.counter_name, - type=m.counter_type, - unit=m.counter_unit, - volume=m.counter_volume, - user_id=m.user_id, - project_id=m.project_id, - resource_id=m.resource_id, - source=m.source, - timestamp=m.timestamp, - recorded_at=m.recorded_at, - metadata=utils.flatten_metadata(m.resource_metadata)) - - @classmethod - def sample(cls): - return cls(id=str(uuid.uuid1()), - meter='instance', - type='gauge', - unit='instance', - volume=1, - resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - project_id='35b17138-b364-4e6a-a131-8f3099c5be68', - user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff', - timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0), - recorded_at=datetime.datetime(2015, 1, 1, 12, 0, 0, 0), - source='openstack', - metadata={'name1': 'value1', - 'name2': 'value2'}, - ) - - -class SamplesController(rest.RestController): - """Controller managing the samples.""" - - @wsme_pecan.wsexpose([Sample], [base.Query], int) - def get_all(self, q=None, limit=None): - """Return all known samples, based on the data recorded so far. - - :param q: Filter rules for the samples to be returned. - :param limit: Maximum number of samples to be returned. - """ - - rbac.enforce('get_samples', pecan.request) - - q = q or [] - - limit = utils.enforce_limit(limit) - kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) - f = storage.SampleFilter(**kwargs) - return map(Sample.from_db_model, - pecan.request.storage_conn.get_samples(f, limit=limit)) - - @wsme_pecan.wsexpose(Sample, wtypes.text) - def get_one(self, sample_id): - """Return a sample. - - :param sample_id: the id of the sample. - """ - - rbac.enforce('get_sample', pecan.request) - - f = storage.SampleFilter(message_id=sample_id) - - samples = list(pecan.request.storage_conn.get_samples(f)) - if len(samples) < 1: - raise base.EntityNotFound(_('Sample'), sample_id) - - return Sample.from_db_model(samples[0]) diff --git a/ceilometer/api/controllers/v2/utils.py b/ceilometer/api/controllers/v2/utils.py index 88142cbd..03f89a2a 100644 --- a/ceilometer/api/controllers/v2/utils.py +++ b/ceilometer/api/controllers/v2/utils.py @@ -18,22 +18,15 @@ # License for the specific language governing permissions and limitations # under the License. -import copy -import datetime import functools -import inspect from oslo_config import cfg from oslo_log import log -from oslo_utils import timeutils import pecan -import six -import wsme from ceilometer.api.controllers.v2 import base from ceilometer.api import rbac from ceilometer.i18n import _, _LI -from ceilometer import utils LOG = log.getLogger(__name__) cfg.CONF.import_opt('default_api_return_limit', 'ceilometer.api.app', @@ -61,265 +54,6 @@ def get_auth_project(on_behalf_of=None): return auth_project -def sanitize_query(query, db_func, on_behalf_of=None): - """Check the query. - - See if: - 1) the request is coming from admin - then allow full visibility - 2) non-admin - make sure that the query includes the requester's project. - """ - q = copy.copy(query) - - auth_project = get_auth_project(on_behalf_of) - if auth_project: - _verify_query_segregation(q, auth_project) - - proj_q = [i for i in q if i.field == 'project_id'] - valid_keys = inspect.getargspec(db_func)[0] - if not proj_q and 'on_behalf_of' not in valid_keys: - # The user is restricted, but they didn't specify a project - # so add it for them. - q.append(base.Query(field='project_id', - op='eq', - value=auth_project)) - return q - - -def _verify_query_segregation(query, auth_project=None): - """Ensure non-admin queries are not constrained to another project.""" - auth_project = (auth_project or - rbac.get_limited_to_project(pecan.request.headers)) - - if not auth_project: - return - - for q in query: - if q.field in ('project', 'project_id') and auth_project != q.value: - raise base.ProjectNotAuthorized(q.value) - - -def validate_query(query, db_func, internal_keys=None, - allow_timestamps=True): - """Validates the syntax of the query and verifies the query. - - Verification check if the query request is authorized for the included - project. - :param query: Query expression that should be validated - :param db_func: the function on the storage level, of which arguments - will form the valid_keys list, which defines the valid fields for a - query expression - :param internal_keys: internally used field names, that should not be - used for querying - :param allow_timestamps: defines whether the timestamp-based constraint is - applicable for this query or not - - :raises InvalidInput: if an operator is not supported for a given field - :raises InvalidInput: if timestamp constraints are allowed, but - search_offset was included without timestamp constraint - :raises: UnknownArgument: if a field name is not a timestamp field, nor - in the list of valid keys - """ - - internal_keys = internal_keys or [] - _verify_query_segregation(query) - - valid_keys = inspect.getargspec(db_func)[0] - - internal_timestamp_keys = ['end_timestamp', 'start_timestamp', - 'end_timestamp_op', 'start_timestamp_op'] - if 'start_timestamp' in valid_keys: - internal_keys += internal_timestamp_keys - valid_keys += ['timestamp', 'search_offset'] - internal_keys.append('self') - internal_keys.append('metaquery') - valid_keys = set(valid_keys) - set(internal_keys) - translation = {'user_id': 'user', - 'project_id': 'project', - 'resource_id': 'resource'} - - has_timestamp_query = _validate_timestamp_fields(query, - 'timestamp', - ('lt', 'le', 'gt', 'ge'), - allow_timestamps) - has_search_offset_query = _validate_timestamp_fields(query, - 'search_offset', - 'eq', - allow_timestamps) - - if has_search_offset_query and not has_timestamp_query: - raise wsme.exc.InvalidInput('field', 'search_offset', - "search_offset cannot be used without " + - "timestamp") - - def _is_field_metadata(field): - return (field.startswith('metadata.') or - field.startswith('resource_metadata.')) - - for i in query: - if i.field not in ('timestamp', 'search_offset'): - key = translation.get(i.field, i.field) - operator = i.op - if key in valid_keys or _is_field_metadata(i.field): - if operator == 'eq': - if key == 'enabled': - i._get_value_as_type('boolean') - elif _is_field_metadata(key): - i._get_value_as_type() - else: - raise wsme.exc.InvalidInput('op', i.op, - 'unimplemented operator for ' - '%s' % i.field) - else: - msg = ("unrecognized field in query: %s, " - "valid keys: %s") % (query, sorted(valid_keys)) - raise wsme.exc.UnknownArgument(key, msg) - - -def _validate_timestamp_fields(query, field_name, operator_list, - allow_timestamps): - """Validates the timestamp related constraints in a query if there are any. - - :param query: query expression that may contain the timestamp fields - :param field_name: timestamp name, which should be checked (timestamp, - search_offset) - :param operator_list: list of operators that are supported for that - timestamp, which was specified in the parameter field_name - :param allow_timestamps: defines whether the timestamp-based constraint is - applicable to this query or not - - :returns: True, if there was a timestamp constraint, containing - a timestamp field named as defined in field_name, in the query and it - was allowed and syntactically correct. - :returns: False, if there wasn't timestamp constraint, containing a - timestamp field named as defined in field_name, in the query - - :raises InvalidInput: if an operator is unsupported for a given timestamp - field - :raises UnknownArgument: if the timestamp constraint is not allowed in - the query - """ - - for item in query: - if item.field == field_name: - # If *timestamp* or *search_offset* field was specified in the - # query, but timestamp is not supported on that resource, on - # which the query was invoked, then raise an exception. - if not allow_timestamps: - raise wsme.exc.UnknownArgument(field_name, - "not valid for " + - "this resource") - if item.op not in operator_list: - raise wsme.exc.InvalidInput('op', item.op, - 'unimplemented operator for %s' % - item.field) - return True - return False - - -def query_to_kwargs(query, db_func, internal_keys=None, - allow_timestamps=True): - validate_query(query, db_func, internal_keys=internal_keys, - allow_timestamps=allow_timestamps) - query = sanitize_query(query, db_func) - translation = {'user_id': 'user', - 'project_id': 'project', - 'resource_id': 'resource'} - stamp = {} - metaquery = {} - kwargs = {} - for i in query: - if i.field == 'timestamp': - if i.op in ('lt', 'le'): - stamp['end_timestamp'] = i.value - stamp['end_timestamp_op'] = i.op - elif i.op in ('gt', 'ge'): - stamp['start_timestamp'] = i.value - stamp['start_timestamp_op'] = i.op - else: - if i.op == 'eq': - if i.field == 'search_offset': - stamp['search_offset'] = i.value - elif i.field == 'enabled': - kwargs[i.field] = i._get_value_as_type('boolean') - elif i.field.startswith('metadata.'): - metaquery[i.field] = i._get_value_as_type() - elif i.field.startswith('resource_metadata.'): - metaquery[i.field[9:]] = i._get_value_as_type() - else: - key = translation.get(i.field, i.field) - kwargs[key] = i.value - - if metaquery and 'metaquery' in inspect.getargspec(db_func)[0]: - kwargs['metaquery'] = metaquery - if stamp: - kwargs.update(_get_query_timestamps(stamp)) - return kwargs - - -def _get_query_timestamps(args=None): - """Return any optional timestamp information in the request. - - Determine the desired range, if any, from the GET arguments. Set - up the query range using the specified offset. - - [query_start ... start_timestamp ... end_timestamp ... query_end] - - Returns a dictionary containing: - - start_timestamp: First timestamp to use for query - start_timestamp_op: First timestamp operator to use for query - end_timestamp: Final timestamp to use for query - end_timestamp_op: Final timestamp operator to use for query - """ - - if args is None: - return {} - search_offset = int(args.get('search_offset', 0)) - - def _parse_timestamp(timestamp): - if not timestamp: - return None - try: - iso_timestamp = timeutils.parse_isotime(timestamp) - iso_timestamp = iso_timestamp.replace(tzinfo=None) - except ValueError: - raise wsme.exc.InvalidInput('timestamp', timestamp, - 'invalid timestamp format') - return iso_timestamp - - start_timestamp = _parse_timestamp(args.get('start_timestamp')) - end_timestamp = _parse_timestamp(args.get('end_timestamp')) - start_timestamp = start_timestamp - datetime.timedelta( - minutes=search_offset) if start_timestamp else None - end_timestamp = end_timestamp + datetime.timedelta( - minutes=search_offset) if end_timestamp else None - return {'start_timestamp': start_timestamp, - 'end_timestamp': end_timestamp, - 'start_timestamp_op': args.get('start_timestamp_op'), - 'end_timestamp_op': args.get('end_timestamp_op')} - - -def flatten_metadata(metadata): - """Return flattened resource metadata. - - Metadata is returned with flattened nested structures (except nested sets) - and with all values converted to unicode strings. - """ - if metadata: - # After changing recursive_keypairs` output we need to keep - # flattening output unchanged. - # Example: recursive_keypairs({'a': {'b':{'c':'d'}}}, '.') - # output before: a.b:c=d - # output now: a.b.c=d - # So to keep the first variant just replace all dots except the first - return dict((k.replace('.', ':').replace(':', '.', 1), - six.text_type(v)) - for k, v in utils.recursive_keypairs(metadata, - separator='.') - if type(v) is not set) - return {} - - # TODO(fabiog): this decorator should disappear and have a more unified # way of controlling access and scope. Before messing with this, though # I feel this file should be re-factored in smaller chunks one for each diff --git a/ceilometer/api/hooks.py b/ceilometer/api/hooks.py index 003a2363..8a28587f 100644 --- a/ceilometer/api/hooks.py +++ b/ceilometer/api/hooks.py @@ -14,20 +14,11 @@ # under the License. from oslo_config import cfg -from oslo_log import log -import oslo_messaging from pecan import hooks -from ceilometer.i18n import _LE -from ceilometer import messaging from ceilometer import storage -LOG = log.getLogger(__name__) - -cfg.CONF.import_opt('telemetry_driver', 'ceilometer.publisher.messaging', - group='publisher_notifier') - class ConfigHook(hooks.PecanHook): """Attach the configuration object to the request. @@ -43,45 +34,12 @@ class ConfigHook(hooks.PecanHook): class DBHook(hooks.PecanHook): def __init__(self): - self.storage_connection = DBHook.get_connection('metering') - self.event_storage_connection = DBHook.get_connection('event') - - if (not self.storage_connection - and not self.event_storage_connection): - raise Exception("Api failed to start. Failed to connect to " - "databases, purpose: %s" % - ', '.join(['metering', 'event'])) + self.event_storage_connection = storage.get_connection_from_config( + cfg.CONF) def before(self, state): - state.request.storage_conn = self.storage_connection state.request.event_storage_conn = self.event_storage_connection - @staticmethod - def get_connection(purpose): - try: - return storage.get_connection_from_config(cfg.CONF, purpose) - except Exception as err: - params = {"purpose": purpose, "err": err} - LOG.exception(_LE("Failed to connect to db, purpose %(purpose)s " - "retry later: %(err)s") % params) - - -class NotifierHook(hooks.PecanHook): - """Create and attach a notifier to the request. - - Usually, samples will be push to notification bus by notifier when they - are posted via /v2/meters/ API. - """ - - def __init__(self): - transport = messaging.get_transport() - self.notifier = oslo_messaging.Notifier( - transport, driver=cfg.CONF.publisher_notifier.telemetry_driver, - publisher_id="ceilometer.api") - - def before(self, state): - state.request.notifier = self.notifier - class TranslationHook(hooks.PecanHook): diff --git a/ceilometer/cmd/agent_notification.py b/ceilometer/cmd/agent_notification.py deleted file mode 100644 index 08b16464..00000000 --- a/ceilometer/cmd/agent_notification.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_service import service as os_service - -from ceilometer import notification -from ceilometer import service - -CONF = cfg.CONF - - -def main(): - service.prepare_service() - os_service.launch(CONF, notification.NotificationService(), - workers=CONF.notification.workers).wait() diff --git a/ceilometer/cmd/collector.py b/ceilometer/cmd/collector.py deleted file mode 100644 index 0a56a7f5..00000000 --- a/ceilometer/cmd/collector.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2014 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_service import service as os_service - -from ceilometer import collector -from ceilometer import service - -CONF = cfg.CONF - - -def main(): - service.prepare_service() - os_service.launch(CONF, collector.CollectorService(), - workers=CONF.collector.workers).wait() diff --git a/ceilometer/cmd/polling.py b/ceilometer/cmd/polling.py deleted file mode 100644 index e4bb583f..00000000 --- a/ceilometer/cmd/polling.py +++ /dev/null @@ -1,84 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Copyright 2014-2015 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log -from oslo_service import service as os_service - -from ceilometer.agent import manager -from ceilometer.i18n import _LW -from ceilometer import service - -LOG = log.getLogger(__name__) - -CONF = cfg.CONF - - -class MultiChoicesOpt(cfg.Opt): - def __init__(self, name, choices=None, **kwargs): - super(MultiChoicesOpt, self).__init__( - name, type=DeduplicatedCfgList(choices), **kwargs) - self.choices = choices - - def _get_argparse_kwargs(self, group, **kwargs): - """Extends the base argparse keyword dict for multi choices options.""" - kwargs = super(MultiChoicesOpt, self)._get_argparse_kwargs(group) - kwargs['nargs'] = '+' - choices = kwargs.get('choices', self.choices) - if choices: - kwargs['choices'] = choices - return kwargs - - -class DeduplicatedCfgList(cfg.types.List): - def __init__(self, choices=None, **kwargs): - super(DeduplicatedCfgList, self).__init__(**kwargs) - self.choices = choices or [] - - def __call__(self, *args, **kwargs): - result = super(DeduplicatedCfgList, self).__call__(*args, **kwargs) - result_set = set(result) - if len(result) != len(result_set): - LOG.warning(_LW("Duplicated values: %s found in CLI options, " - "auto de-duplicated"), result) - result = list(result_set) - if self.choices and not (result_set <= set(self.choices)): - raise Exception('Valid values are %s, but found %s' - % (self.choices, result)) - return result - - -CLI_OPTS = [ - MultiChoicesOpt('polling-namespaces', - default=['compute', 'central'], - choices=['compute', 'central', 'ipmi'], - dest='polling_namespaces', - help='Polling namespace(s) to be used while ' - 'resource polling'), - MultiChoicesOpt('pollster-list', - default=[], - dest='pollster_list', - help='List of pollsters (or wildcard templates) to be ' - 'used while polling'), -] - -CONF.register_cli_opts(CLI_OPTS) - - -def main(): - service.prepare_service() - os_service.launch(CONF, manager.AgentManager(CONF.polling_namespaces, - CONF.pollster_list)).wait() diff --git a/ceilometer/cmd/sample.py b/ceilometer/cmd/sample.py deleted file mode 100644 index 6157f3c9..00000000 --- a/ceilometer/cmd/sample.py +++ /dev/null @@ -1,93 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2012-2014 Julien Danjou -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Command line tool for creating meter for Ceilometer. -""" -import logging -import sys - -from oslo_config import cfg -from oslo_utils import timeutils -from stevedore import extension - -from ceilometer import pipeline -from ceilometer import sample -from ceilometer import service - - -def send_sample(): - cfg.CONF.register_cli_opts([ - cfg.StrOpt('sample-name', - short='n', - help='Meter name.', - required=True), - cfg.StrOpt('sample-type', - short='y', - help='Meter type (gauge, delta, cumulative).', - default='gauge', - required=True), - cfg.StrOpt('sample-unit', - short='U', - help='Meter unit.'), - cfg.IntOpt('sample-volume', - short='l', - help='Meter volume value.', - default=1), - cfg.StrOpt('sample-resource', - short='r', - help='Meter resource id.', - required=True), - cfg.StrOpt('sample-user', - short='u', - help='Meter user id.'), - cfg.StrOpt('sample-project', - short='p', - help='Meter project id.'), - cfg.StrOpt('sample-timestamp', - short='i', - help='Meter timestamp.', - default=timeutils.utcnow().isoformat()), - cfg.StrOpt('sample-metadata', - short='m', - help='Meter metadata.'), - ]) - - service.prepare_service() - - # Set up logging to use the console - console = logging.StreamHandler(sys.stderr) - console.setLevel(logging.DEBUG) - formatter = logging.Formatter('%(message)s') - console.setFormatter(formatter) - root_logger = logging.getLogger('') - root_logger.addHandler(console) - root_logger.setLevel(logging.DEBUG) - - pipeline_manager = pipeline.setup_pipeline( - extension.ExtensionManager('ceilometer.transformer')) - - with pipeline_manager.publisher() as p: - p([sample.Sample( - name=cfg.CONF.sample_name, - type=cfg.CONF.sample_type, - unit=cfg.CONF.sample_unit, - volume=cfg.CONF.sample_volume, - user_id=cfg.CONF.sample_user, - project_id=cfg.CONF.sample_project, - resource_id=cfg.CONF.sample_resource, - timestamp=cfg.CONF.sample_timestamp, - resource_metadata=cfg.CONF.sample_metadata and eval( - cfg.CONF.sample_metadata))]) diff --git a/ceilometer/cmd/storage.py b/ceilometer/cmd/storage.py index 977b1929..bc28f256 100644 --- a/ceilometer/cmd/storage.py +++ b/ceilometer/cmd/storage.py @@ -27,25 +27,15 @@ LOG = log.getLogger(__name__) def dbsync(): service.prepare_service() - storage.get_connection_from_config(cfg.CONF, 'metering').upgrade() - storage.get_connection_from_config(cfg.CONF, 'event').upgrade() + storage.get_connection_from_config(cfg.CONF).upgrade() def expirer(): service.prepare_service() - if cfg.CONF.database.metering_time_to_live > 0: - LOG.debug("Clearing expired metering data") - storage_conn = storage.get_connection_from_config(cfg.CONF, 'metering') - storage_conn.clear_expired_metering_data( - cfg.CONF.database.metering_time_to_live) - else: - LOG.info(_LI("Nothing to clean, database metering time to live " - "is disabled")) - if cfg.CONF.database.event_time_to_live > 0: LOG.debug("Clearing expired event data") - event_conn = storage.get_connection_from_config(cfg.CONF, 'event') + event_conn = storage.get_connection_from_config(cfg.CONF) event_conn.clear_expired_event_data( cfg.CONF.database.event_time_to_live) else: diff --git a/ceilometer/collector.py b/ceilometer/collector.py deleted file mode 100644 index 4ec3bd99..00000000 --- a/ceilometer/collector.py +++ /dev/null @@ -1,184 +0,0 @@ -# -# Copyright 2012-2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from itertools import chain -import socket - -import msgpack -from oslo_config import cfg -from oslo_log import log -import oslo_messaging -from oslo_utils import netutils -from oslo_utils import units - -from ceilometer import dispatcher -from ceilometer.i18n import _, _LE, _LW -from ceilometer import messaging -from ceilometer import service_base -from ceilometer import utils - -OPTS = [ - cfg.StrOpt('udp_address', - default='0.0.0.0', - help='Address to which the UDP socket is bound. Set to ' - 'an empty string to disable.'), - cfg.PortOpt('udp_port', - default=4952, - help='Port to which the UDP socket is bound.'), - cfg.IntOpt('batch_size', - default=1, - help='Number of notification messages to wait before ' - 'dispatching them'), - cfg.IntOpt('batch_timeout', - default=None, - help='Number of seconds to wait before dispatching samples' - 'when batch_size is not reached (None means indefinitely)'), -] - -cfg.CONF.register_opts(OPTS, group="collector") -cfg.CONF.import_opt('metering_topic', 'ceilometer.publisher.messaging', - group='publisher_notifier') -cfg.CONF.import_opt('event_topic', 'ceilometer.publisher.messaging', - group='publisher_notifier') -cfg.CONF.import_opt('store_events', 'ceilometer.notification', - group='notification') - - -LOG = log.getLogger(__name__) - - -class CollectorService(service_base.ServiceBase): - """Listener for the collector service.""" - def start(self): - """Bind the UDP socket and handle incoming data.""" - # ensure dispatcher is configured before starting other services - dispatcher_managers = dispatcher.load_dispatcher_manager() - (self.meter_manager, self.event_manager) = dispatcher_managers - self.sample_listener = None - self.event_listener = None - self.udp_thread = None - super(CollectorService, self).start() - - if cfg.CONF.collector.udp_address: - self.udp_thread = utils.spawn_thread(self.start_udp) - - transport = messaging.get_transport(optional=True) - if transport: - if list(self.meter_manager): - sample_target = oslo_messaging.Target( - topic=cfg.CONF.publisher_notifier.metering_topic) - self.sample_listener = ( - messaging.get_batch_notification_listener( - transport, [sample_target], - [SampleEndpoint(self.meter_manager)], - allow_requeue=True, - batch_size=cfg.CONF.collector.batch_size, - batch_timeout=cfg.CONF.collector.batch_timeout)) - self.sample_listener.start() - - if cfg.CONF.notification.store_events and list(self.event_manager): - event_target = oslo_messaging.Target( - topic=cfg.CONF.publisher_notifier.event_topic) - self.event_listener = ( - messaging.get_batch_notification_listener( - transport, [event_target], - [EventEndpoint( - EventDispatcherVerificator(self.event_manager))], - allow_requeue=True, - batch_size=cfg.CONF.collector.batch_size, - batch_timeout=cfg.CONF.collector.batch_timeout)) - self.event_listener.start() - - def start_udp(self): - address_family = socket.AF_INET - if netutils.is_valid_ipv6(cfg.CONF.collector.udp_address): - address_family = socket.AF_INET6 - udp = socket.socket(address_family, socket.SOCK_DGRAM) - udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - udp.bind((cfg.CONF.collector.udp_address, - cfg.CONF.collector.udp_port)) - - self.udp_run = True - while self.udp_run: - # NOTE(jd) Arbitrary limit of 64K because that ought to be - # enough for anybody. - data, source = udp.recvfrom(64 * units.Ki) - try: - sample = msgpack.loads(data, encoding='utf-8') - except Exception: - LOG.warning(_("UDP: Cannot decode data sent by %s"), source) - else: - try: - LOG.debug("UDP: Storing %s", sample) - self.meter_manager.map_method( - 'verify_and_record_metering_data', sample) - except Exception: - LOG.exception(_("UDP: Unable to store meter")) - - def stop(self): - if self.sample_listener: - utils.kill_listeners([self.sample_listener]) - if self.event_listener: - utils.kill_listeners([self.event_listener]) - if self.udp_thread: - self.udp_run = False - self.udp_thread.join() - super(CollectorService, self).stop() - - -class CollectorEndpoint(object): - def __init__(self, dispatcher_manager): - self.dispatcher_manager = dispatcher_manager - - def sample(self, messages): - """RPC endpoint for notification messages - - When another service sends a notification over the message - bus, this method receives it. - """ - samples = list(chain.from_iterable(m["payload"] for m in messages)) - try: - self.dispatcher_manager.map_method(self.method, samples) - except Exception: - LOG.exception(_LE("Dispatcher failed to handle the %s, " - "requeue it."), self.ep_type) - return oslo_messaging.NotificationResult.REQUEUE - - -class SampleEndpoint(CollectorEndpoint): - method = 'verify_and_record_metering_data' - ep_type = 'sample' - - -class EventDispatcherVerificator(object): - def __init__(self, dispatcher): - self.dispatcher = dispatcher - - def verify_and_record_events(self, events): - """Verify event signature and record them.""" - goods = [] - for event in events: - if utils.verify_signature( - event, self.conf.publisher.telemetry_secret): - goods.append(event) - else: - LOG.warning(_LW( - 'event signature invalid, discarding event: %s'), event) - return self.dispatcher.record_events(goods) - - -class EventEndpoint(CollectorEndpoint): - method = 'verify_and_record_events' - ep_type = 'event' diff --git a/ceilometer/compute/__init__.py b/ceilometer/compute/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/compute/discovery.py b/ceilometer/compute/discovery.py deleted file mode 100644 index d00de2a3..00000000 --- a/ceilometer/compute/discovery.py +++ /dev/null @@ -1,87 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_utils import timeutils - -from ceilometer.agent import plugin_base -from ceilometer import nova_client - -OPTS = [ - cfg.BoolOpt('workload_partitioning', - default=False, - help='Enable work-load partitioning, allowing multiple ' - 'compute agents to be run simultaneously.'), - cfg.IntOpt('resource_update_interval', - default=0, - min=0, - help="New instances will be discovered periodically based" - " on this option (in seconds). By default, " - "the agent discovers instances according to pipeline " - "polling interval. If option is greater than 0, " - "the instance list to poll will be updated based " - "on this option's interval. Measurements relating " - "to the instances will match intervals " - "defined in pipeline.") -] -cfg.CONF.register_opts(OPTS, group='compute') - - -class InstanceDiscovery(plugin_base.DiscoveryBase): - def __init__(self): - super(InstanceDiscovery, self).__init__() - self.nova_cli = nova_client.Client() - self.last_run = None - self.instances = {} - self.expiration_time = cfg.CONF.compute.resource_update_interval - - def discover(self, manager, param=None): - """Discover resources to monitor.""" - secs_from_last_update = 0 - if self.last_run: - secs_from_last_update = timeutils.delta_seconds( - self.last_run, timeutils.utcnow(True)) - - instances = [] - # NOTE(ityaptin) we update make a nova request only if - # it's a first discovery or resources expired - if not self.last_run or secs_from_last_update >= self.expiration_time: - try: - utc_now = timeutils.utcnow(True) - since = self.last_run.isoformat() if self.last_run else None - instances = self.nova_cli.instance_get_all_by_host( - cfg.CONF.host, since) - self.last_run = utc_now - except Exception: - # NOTE(zqfan): instance_get_all_by_host is wrapped and will log - # exception when there is any error. It is no need to raise it - # again and print one more time. - return [] - - for instance in instances: - if getattr(instance, 'OS-EXT-STS:vm_state', None) in ['deleted', - 'error']: - self.instances.pop(instance.id, None) - else: - self.instances[instance.id] = instance - - return self.instances.values() - - @property - def group_id(self): - if cfg.CONF.compute.workload_partitioning: - return cfg.CONF.host - else: - return None diff --git a/ceilometer/compute/notifications/__init__.py b/ceilometer/compute/notifications/__init__.py deleted file mode 100644 index fac3ff28..00000000 --- a/ceilometer/compute/notifications/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -# -# Copyright 2013 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import oslo_messaging - -from ceilometer.agent import plugin_base - - -OPTS = [ - cfg.StrOpt('nova_control_exchange', - default='nova', - help="Exchange name for Nova notifications."), -] - - -cfg.CONF.register_opts(OPTS) - - -class ComputeNotificationBase(plugin_base.NotificationBase): - def get_targets(self, conf): - """Return a sequence of oslo_messaging.Target - - This sequence is defining the exchange and topics to be connected for - this plugin. - """ - return [oslo_messaging.Target(topic=topic, - exchange=conf.nova_control_exchange) - for topic in self.get_notification_topics(conf)] diff --git a/ceilometer/compute/notifications/instance.py b/ceilometer/compute/notifications/instance.py deleted file mode 100644 index 356fbea5..00000000 --- a/ceilometer/compute/notifications/instance.py +++ /dev/null @@ -1,89 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Converters for producing compute sample messages from notification events. -""" - -import abc - -import six - -from ceilometer.agent import plugin_base -from ceilometer.compute import notifications -from ceilometer.compute import util -from ceilometer import sample - - -@six.add_metaclass(abc.ABCMeta) -class UserMetadataAwareInstanceNotificationBase( - notifications.ComputeNotificationBase): - """Consumes notifications containing instance user metadata.""" - - def process_notification(self, message): - instance_properties = self.get_instance_properties(message) - if isinstance(instance_properties.get('metadata'), dict): - src_metadata = instance_properties['metadata'] - del instance_properties['metadata'] - util.add_reserved_user_metadata(src_metadata, instance_properties) - return self.get_sample(message) - - def get_instance_properties(self, message): - """Retrieve instance properties from notification payload.""" - return message['payload'] - - @abc.abstractmethod - def get_sample(self, message): - """Derive sample from notification payload.""" - - -class InstanceScheduled(UserMetadataAwareInstanceNotificationBase, - plugin_base.NonMetricNotificationBase): - event_types = ['scheduler.run_instance.scheduled'] - - def get_instance_properties(self, message): - """Retrieve instance properties from notification payload.""" - return message['payload']['request_spec']['instance_properties'] - - def get_sample(self, message): - yield sample.Sample.from_notification( - name='instance.scheduled', - type=sample.TYPE_DELTA, - volume=1, - unit='instance', - user_id=None, - project_id=message['payload']['request_spec'] - ['instance_properties']['project_id'], - resource_id=message['payload']['instance_id'], - message=message) - - -class ComputeInstanceNotificationBase( - UserMetadataAwareInstanceNotificationBase): - """Convert compute.instance.* notifications into Samples.""" - event_types = ['compute.instance.*'] - - -class Instance(ComputeInstanceNotificationBase, - plugin_base.NonMetricNotificationBase): - def get_sample(self, message): - yield sample.Sample.from_notification( - name='instance', - type=sample.TYPE_GAUGE, - unit='instance', - volume=1, - user_id=message['payload']['user_id'], - project_id=message['payload']['tenant_id'], - resource_id=message['payload']['instance_id'], - message=message) diff --git a/ceilometer/compute/pollsters/__init__.py b/ceilometer/compute/pollsters/__init__.py deleted file mode 100644 index f56122b1..00000000 --- a/ceilometer/compute/pollsters/__init__.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2014 Mirantis, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from oslo_utils import timeutils -import six - -from ceilometer.agent import plugin_base -from ceilometer.compute.virt import inspector as virt_inspector - - -@six.add_metaclass(abc.ABCMeta) -class BaseComputePollster(plugin_base.PollsterBase): - - def setup_environment(self): - super(BaseComputePollster, self).setup_environment() - # propagate exception from check_sanity - self.inspector.check_sanity() - - @property - def inspector(self): - try: - inspector = self._inspector - except AttributeError: - inspector = virt_inspector.get_hypervisor_inspector() - BaseComputePollster._inspector = inspector - return inspector - - @property - def default_discovery(self): - return 'local_instances' - - @staticmethod - def _populate_cache_create(_i_cache, _instance, _inspector, - _DiskData, _inspector_attr, _stats_attr): - """Settings and return cache.""" - if _instance.id not in _i_cache: - _data = 0 - _per_device_data = {} - disk_rates = getattr(_inspector, _inspector_attr)(_instance) - for disk, stats in disk_rates: - _data += getattr(stats, _stats_attr) - _per_device_data[disk.device] = ( - getattr(stats, _stats_attr)) - _per_disk_data = { - _stats_attr: _per_device_data - } - _i_cache[_instance.id] = _DiskData( - _data, - _per_disk_data - ) - return _i_cache[_instance.id] - - def _record_poll_time(self): - """Method records current time as the poll time. - - :return: time in seconds since the last poll time was recorded - """ - current_time = timeutils.utcnow() - duration = None - if hasattr(self, '_last_poll_time'): - duration = timeutils.delta_seconds(self._last_poll_time, - current_time) - self._last_poll_time = current_time - return duration diff --git a/ceilometer/compute/pollsters/cpu.py b/ceilometer/compute/pollsters/cpu.py deleted file mode 100644 index d8ee3671..00000000 --- a/ceilometer/compute/pollsters/cpu.py +++ /dev/null @@ -1,93 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log - -import ceilometer -from ceilometer.compute import pollsters -from ceilometer.compute.pollsters import util -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.i18n import _ -from ceilometer import sample - -LOG = log.getLogger(__name__) - - -class CPUPollster(pollsters.BaseComputePollster): - - def get_samples(self, manager, cache, resources): - for instance in resources: - LOG.debug('checking instance %s', instance.id) - try: - cpu_info = self.inspector.inspect_cpus(instance) - LOG.debug("CPUTIME USAGE: %(instance)s %(time)d", - {'instance': instance, - 'time': cpu_info.time}) - cpu_num = {'cpu_number': cpu_info.number} - yield util.make_sample_from_instance( - instance, - name='cpu', - type=sample.TYPE_CUMULATIVE, - unit='ns', - volume=cpu_info.time, - additional_metadata=cpu_num, - ) - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except virt_inspector.InstanceShutOffException as e: - LOG.debug('Instance %(instance_id)s was shut off while ' - 'getting samples of %(pollster)s: %(exc)s', - {'instance_id': instance.id, - 'pollster': self.__class__.__name__, 'exc': e}) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('Obtaining CPU time is not implemented for %s', - self.inspector.__class__.__name__) - except Exception as err: - LOG.exception(_('could not get CPU time for %(id)s: %(e)s'), - {'id': instance.id, 'e': err}) - - -class CPUUtilPollster(pollsters.BaseComputePollster): - - def get_samples(self, manager, cache, resources): - self._inspection_duration = self._record_poll_time() - for instance in resources: - LOG.debug('Checking CPU util for instance %s', instance.id) - try: - cpu_info = self.inspector.inspect_cpu_util( - instance, self._inspection_duration) - LOG.debug("CPU UTIL: %(instance)s %(util)d", - {'instance': instance, - 'util': cpu_info.util}) - yield util.make_sample_from_instance( - instance, - name='cpu_util', - type=sample.TYPE_GAUGE, - unit='%', - volume=cpu_info.util, - ) - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('Obtaining CPU Util is not implemented for %s', - self.inspector.__class__.__name__) - except Exception as err: - LOG.exception(_('Could not get CPU Util for %(id)s: %(e)s'), - {'id': instance.id, 'e': err}) diff --git a/ceilometer/compute/pollsters/disk.py b/ceilometer/compute/pollsters/disk.py deleted file mode 100644 index d277cbca..00000000 --- a/ceilometer/compute/pollsters/disk.py +++ /dev/null @@ -1,694 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# Copyright 2014 Cisco Systems, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import abc -import collections - -from oslo_log import log -import six - -import ceilometer -from ceilometer.compute import pollsters -from ceilometer.compute.pollsters import util -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.i18n import _ -from ceilometer import sample - -LOG = log.getLogger(__name__) - - -DiskIOData = collections.namedtuple( - 'DiskIOData', - 'r_bytes r_requests w_bytes w_requests per_disk_requests', -) - -DiskRateData = collections.namedtuple('DiskRateData', - ['read_bytes_rate', - 'read_requests_rate', - 'write_bytes_rate', - 'write_requests_rate', - 'per_disk_rate']) - -DiskLatencyData = collections.namedtuple('DiskLatencyData', - ['disk_latency', - 'per_disk_latency']) - -DiskIOPSData = collections.namedtuple('DiskIOPSData', - ['iops_count', - 'per_disk_iops']) - -DiskInfoData = collections.namedtuple('DiskInfoData', - ['capacity', - 'allocation', - 'physical', - 'per_disk_info']) - - -@six.add_metaclass(abc.ABCMeta) -class _Base(pollsters.BaseComputePollster): - - DISKIO_USAGE_MESSAGE = ' '.join(["DISKIO USAGE:", - "%s %s:", - "read-requests=%d", - "read-bytes=%d", - "write-requests=%d", - "write-bytes=%d", - "errors=%d", - ]) - - CACHE_KEY_DISK = 'diskio' - - def _populate_cache(self, inspector, cache, instance): - i_cache = cache.setdefault(self.CACHE_KEY_DISK, {}) - if instance.id not in i_cache: - r_bytes = 0 - r_requests = 0 - w_bytes = 0 - w_requests = 0 - per_device_read_bytes = {} - per_device_read_requests = {} - per_device_write_bytes = {} - per_device_write_requests = {} - for disk, info in inspector.inspect_disks(instance): - LOG.debug(self.DISKIO_USAGE_MESSAGE, - instance, disk.device, info.read_requests, - info.read_bytes, info.write_requests, - info.write_bytes, info.errors) - r_bytes += info.read_bytes - r_requests += info.read_requests - w_bytes += info.write_bytes - w_requests += info.write_requests - # per disk data - per_device_read_bytes[disk.device] = info.read_bytes - per_device_read_requests[disk.device] = info.read_requests - per_device_write_bytes[disk.device] = info.write_bytes - per_device_write_requests[disk.device] = info.write_requests - per_device_requests = { - 'read_bytes': per_device_read_bytes, - 'read_requests': per_device_read_requests, - 'write_bytes': per_device_write_bytes, - 'write_requests': per_device_write_requests, - } - i_cache[instance.id] = DiskIOData( - r_bytes=r_bytes, - r_requests=r_requests, - w_bytes=w_bytes, - w_requests=w_requests, - per_disk_requests=per_device_requests, - ) - return i_cache[instance.id] - - @abc.abstractmethod - def _get_samples(instance, c_data): - """Return one or more Sample.""" - - @staticmethod - def _get_sample_read_and_write(instance, _name, _unit, c_data, - _volume, _metadata): - """Read / write Pollster and return one Sample""" - return [util.make_sample_from_instance( - instance, - name=_name, - type=sample.TYPE_CUMULATIVE, - unit=_unit, - volume=getattr(c_data, _volume), - additional_metadata={ - 'device': c_data.per_disk_requests[_metadata].keys()}, - )] - - @staticmethod - def _get_samples_per_device(c_data, _attr, instance, _name, _unit): - """Return one or more Samples for meter 'disk.device.*'""" - samples = [] - for disk, value in six.iteritems(c_data.per_disk_requests[_attr]): - samples.append(util.make_sample_from_instance( - instance, - name=_name, - type=sample.TYPE_CUMULATIVE, - unit=_unit, - volume=value, - resource_id="%s-%s" % (instance.id, disk), - additional_metadata={'disk_name': disk}, - )) - return samples - - def get_samples(self, manager, cache, resources): - for instance in resources: - instance_name = util.instance_name(instance) - try: - c_data = self._populate_cache( - self.inspector, - cache, - instance, - ) - for s in self._get_samples(instance, c_data): - yield s - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except virt_inspector.InstanceShutOffException as e: - LOG.debug('Instance %(instance_id)s was shut off while ' - 'getting samples of %(pollster)s: %(exc)s', - {'instance_id': instance.id, - 'pollster': self.__class__.__name__, 'exc': e}) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('%(inspector)s does not provide data for ' - ' %(pollster)s', - {'inspector': self.inspector.__class__.__name__, - 'pollster': self.__class__.__name__}) - except Exception as err: - LOG.exception(_('Ignoring instance %(name)s: %(error)s'), - {'name': instance_name, 'error': err}) - - -class ReadRequestsPollster(_Base): - - def _get_samples(self, instance, c_data): - return self._get_sample_read_and_write( - instance, 'disk.read.requests', 'request', c_data, - 'r_requests', 'read_requests') - - -class PerDeviceReadRequestsPollster(_Base): - - def _get_samples(self, instance, c_data): - return self._get_samples_per_device( - c_data, 'read_requests', instance, - 'disk.device.read.requests', 'request') - - -class ReadBytesPollster(_Base): - - def _get_samples(self, instance, c_data): - return self._get_sample_read_and_write( - instance, 'disk.read.bytes', 'B', c_data, - 'r_bytes', 'read_bytes') - - -class PerDeviceReadBytesPollster(_Base): - - def _get_samples(self, instance, c_data): - return self._get_samples_per_device( - c_data, 'read_bytes', instance, - 'disk.device.read.bytes', 'B') - - -class WriteRequestsPollster(_Base): - - def _get_samples(self, instance, c_data): - return self._get_sample_read_and_write( - instance, 'disk.write.requests', 'request', - c_data, 'w_requests', 'write_requests') - - -class PerDeviceWriteRequestsPollster(_Base): - - def _get_samples(self, instance, c_data): - return self._get_samples_per_device( - c_data, 'write_requests', instance, - 'disk.device.write.requests', 'request') - - -class WriteBytesPollster(_Base): - - def _get_samples(self, instance, c_data): - return self._get_sample_read_and_write( - instance, 'disk.write.bytes', 'B', - c_data, 'w_bytes', 'write_bytes') - - -class PerDeviceWriteBytesPollster(_Base): - - def _get_samples(self, instance, c_data): - return self._get_samples_per_device( - c_data, 'write_bytes', instance, - 'disk.device.write.bytes', 'B') - - -@six.add_metaclass(abc.ABCMeta) -class _DiskRatesPollsterBase(pollsters.BaseComputePollster): - - CACHE_KEY_DISK_RATE = 'diskio-rate' - - def _populate_cache(self, inspector, cache, instance): - i_cache = cache.setdefault(self.CACHE_KEY_DISK_RATE, {}) - if instance.id not in i_cache: - r_bytes_rate = 0 - r_requests_rate = 0 - w_bytes_rate = 0 - w_requests_rate = 0 - per_disk_r_bytes_rate = {} - per_disk_r_requests_rate = {} - per_disk_w_bytes_rate = {} - per_disk_w_requests_rate = {} - disk_rates = inspector.inspect_disk_rates( - instance, self._inspection_duration) - for disk, info in disk_rates: - r_bytes_rate += info.read_bytes_rate - r_requests_rate += info.read_requests_rate - w_bytes_rate += info.write_bytes_rate - w_requests_rate += info.write_requests_rate - - per_disk_r_bytes_rate[disk.device] = info.read_bytes_rate - per_disk_r_requests_rate[disk.device] = info.read_requests_rate - per_disk_w_bytes_rate[disk.device] = info.write_bytes_rate - per_disk_w_requests_rate[disk.device] = ( - info.write_requests_rate) - per_disk_rate = { - 'read_bytes_rate': per_disk_r_bytes_rate, - 'read_requests_rate': per_disk_r_requests_rate, - 'write_bytes_rate': per_disk_w_bytes_rate, - 'write_requests_rate': per_disk_w_requests_rate, - } - i_cache[instance.id] = DiskRateData( - r_bytes_rate, - r_requests_rate, - w_bytes_rate, - w_requests_rate, - per_disk_rate - ) - return i_cache[instance.id] - - @abc.abstractmethod - def _get_samples(self, instance, disk_rates_info): - """Return one or more Sample.""" - - def get_samples(self, manager, cache, resources): - self._inspection_duration = self._record_poll_time() - for instance in resources: - try: - disk_rates_info = self._populate_cache( - self.inspector, - cache, - instance, - ) - for disk_rate in self._get_samples(instance, disk_rates_info): - yield disk_rate - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('%(inspector)s does not provide data for ' - ' %(pollster)s', - {'inspector': self.inspector.__class__.__name__, - 'pollster': self.__class__.__name__}) - except Exception as err: - instance_name = util.instance_name(instance) - LOG.exception(_('Ignoring instance %(name)s: %(error)s'), - {'name': instance_name, 'error': err}) - - def _get_samples_per_device(self, disk_rates_info, _attr, instance, - _name, _unit): - """Return one or more Samples for meter 'disk.device.*'.""" - samples = [] - for disk, value in six.iteritems(disk_rates_info.per_disk_rate[ - _attr]): - samples.append(util.make_sample_from_instance( - instance, - name=_name, - type=sample.TYPE_GAUGE, - unit=_unit, - volume=value, - resource_id="%s-%s" % (instance.id, disk), - additional_metadata={'disk_name': disk}, - )) - return samples - - def _get_sample_read_and_write(self, instance, _name, _unit, _element, - _attr1, _attr2): - """Read / write Pollster and return one Sample""" - return [util.make_sample_from_instance( - instance, - name=_name, - type=sample.TYPE_GAUGE, - unit=_unit, - volume=getattr(_element, _attr1), - additional_metadata={ - 'device': getattr(_element, _attr2)[_attr1].keys()}, - )] - - -class ReadBytesRatePollster(_DiskRatesPollsterBase): - - def _get_samples(self, instance, disk_rates_info): - return self._get_sample_read_and_write( - instance, 'disk.read.bytes.rate', 'B/s', disk_rates_info, - 'read_bytes_rate', 'per_disk_rate') - - -class PerDeviceReadBytesRatePollster(_DiskRatesPollsterBase): - - def _get_samples(self, instance, disk_rates_info): - return self._get_samples_per_device( - disk_rates_info, 'read_bytes_rate', instance, - 'disk.device.read.bytes.rate', 'B/s') - - -class ReadRequestsRatePollster(_DiskRatesPollsterBase): - - def _get_samples(self, instance, disk_rates_info): - return self._get_sample_read_and_write( - instance, 'disk.read.requests.rate', 'requests/s', disk_rates_info, - 'read_requests_rate', 'per_disk_rate') - - -class PerDeviceReadRequestsRatePollster(_DiskRatesPollsterBase): - - def _get_samples(self, instance, disk_rates_info): - return self._get_samples_per_device( - disk_rates_info, 'read_requests_rate', instance, - 'disk.device.read.requests.rate', 'requests/s') - - -class WriteBytesRatePollster(_DiskRatesPollsterBase): - - def _get_samples(self, instance, disk_rates_info): - return self._get_sample_read_and_write( - instance, 'disk.write.bytes.rate', 'B/s', disk_rates_info, - 'write_bytes_rate', 'per_disk_rate') - - -class PerDeviceWriteBytesRatePollster(_DiskRatesPollsterBase): - - def _get_samples(self, instance, disk_rates_info): - return self._get_samples_per_device( - disk_rates_info, 'write_bytes_rate', instance, - 'disk.device.write.bytes.rate', 'B/s') - - -class WriteRequestsRatePollster(_DiskRatesPollsterBase): - - def _get_samples(self, instance, disk_rates_info): - return self._get_sample_read_and_write( - instance, 'disk.write.requests.rate', 'requests/s', - disk_rates_info, 'write_requests_rate', 'per_disk_rate') - - -class PerDeviceWriteRequestsRatePollster(_DiskRatesPollsterBase): - - def _get_samples(self, instance, disk_rates_info): - return self._get_samples_per_device( - disk_rates_info, 'write_requests_rate', instance, - 'disk.device.write.requests.rate', 'requests/s') - - -@six.add_metaclass(abc.ABCMeta) -class _DiskLatencyPollsterBase(pollsters.BaseComputePollster): - - CACHE_KEY_DISK_LATENCY = 'disk-latency' - - def _populate_cache(self, inspector, cache, instance): - return self._populate_cache_create( - cache.setdefault(self.CACHE_KEY_DISK_LATENCY, {}), - instance, inspector, DiskLatencyData, - 'inspect_disk_latency', 'disk_latency') - - @abc.abstractmethod - def _get_samples(self, instance, disk_rates_info): - """Return one or more Sample.""" - - def get_samples(self, manager, cache, resources): - for instance in resources: - try: - disk_latency_info = self._populate_cache( - self.inspector, - cache, - instance, - ) - for disk_latency in self._get_samples(instance, - disk_latency_info): - yield disk_latency - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('%(inspector)s does not provide data for ' - ' %(pollster)s', - {'inspector': self.inspector.__class__.__name__, - 'pollster': self.__class__.__name__}) - except Exception as err: - instance_name = util.instance_name(instance) - LOG.exception(_('Ignoring instance %(name)s: %(error)s'), - {'name': instance_name, 'error': err}) - - -class DiskLatencyPollster(_DiskLatencyPollsterBase): - - def _get_samples(self, instance, disk_latency_info): - return [util.make_sample_from_instance( - instance, - name='disk.latency', - type=sample.TYPE_GAUGE, - unit='ms', - volume=disk_latency_info.disk_latency / 1000 - )] - - -class PerDeviceDiskLatencyPollster(_DiskLatencyPollsterBase): - - def _get_samples(self, instance, disk_latency_info): - samples = [] - for disk, value in six.iteritems(disk_latency_info.per_disk_latency[ - 'disk_latency']): - samples.append(util.make_sample_from_instance( - instance, - name='disk.device.latency', - type=sample.TYPE_GAUGE, - unit='ms', - volume=value / 1000, - resource_id="%s-%s" % (instance.id, disk), - additional_metadata={'disk_name': disk}, - )) - return samples - - -class _DiskIOPSPollsterBase(pollsters.BaseComputePollster): - - CACHE_KEY_DISK_IOPS = 'disk-iops' - - def _populate_cache(self, inspector, cache, instance): - return self._populate_cache_create( - cache.setdefault(self.CACHE_KEY_DISK_IOPS, {}), - instance, inspector, DiskIOPSData, - 'inspect_disk_iops', 'iops_count') - - @abc.abstractmethod - def _get_samples(self, instance, disk_rates_info): - """Return one or more Sample.""" - - def get_samples(self, manager, cache, resources): - for instance in resources: - try: - disk_iops_info = self._populate_cache( - self.inspector, - cache, - instance, - ) - for disk_iops in self._get_samples(instance, - disk_iops_info): - yield disk_iops - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('%(inspector)s does not provide data for ' - '%(pollster)s', - {'inspector': self.inspector.__class__.__name__, - 'pollster': self.__class__.__name__}) - except Exception as err: - instance_name = util.instance_name(instance) - LOG.exception(_('Ignoring instance %(name)s: %(error)s'), - {'name': instance_name, 'error': err}) - - -class DiskIOPSPollster(_DiskIOPSPollsterBase): - - def _get_samples(self, instance, disk_iops_info): - return [util.make_sample_from_instance( - instance, - name='disk.iops', - type=sample.TYPE_GAUGE, - unit='count/s', - volume=disk_iops_info.iops_count - )] - - -class PerDeviceDiskIOPSPollster(_DiskIOPSPollsterBase): - - def _get_samples(self, instance, disk_iops_info): - samples = [] - for disk, value in six.iteritems(disk_iops_info.per_disk_iops[ - 'iops_count']): - samples.append(util.make_sample_from_instance( - instance, - name='disk.device.iops', - type=sample.TYPE_GAUGE, - unit='count/s', - volume=value, - resource_id="%s-%s" % (instance.id, disk), - additional_metadata={'disk_name': disk}, - )) - return samples - - -@six.add_metaclass(abc.ABCMeta) -class _DiskInfoPollsterBase(pollsters.BaseComputePollster): - - CACHE_KEY_DISK_INFO = 'diskinfo' - - def _populate_cache(self, inspector, cache, instance): - i_cache = cache.setdefault(self.CACHE_KEY_DISK_INFO, {}) - if instance.id not in i_cache: - all_capacity = 0 - all_allocation = 0 - all_physical = 0 - per_disk_capacity = {} - per_disk_allocation = {} - per_disk_physical = {} - disk_info = inspector.inspect_disk_info( - instance) - for disk, info in disk_info: - all_capacity += info.capacity - all_allocation += info.allocation - all_physical += info.physical - - per_disk_capacity[disk.device] = info.capacity - per_disk_allocation[disk.device] = info.allocation - per_disk_physical[disk.device] = info.physical - per_disk_info = { - 'capacity': per_disk_capacity, - 'allocation': per_disk_allocation, - 'physical': per_disk_physical, - } - i_cache[instance.id] = DiskInfoData( - all_capacity, - all_allocation, - all_physical, - per_disk_info - ) - return i_cache[instance.id] - - @abc.abstractmethod - def _get_samples(self, instance, disk_info): - """Return one or more Sample.""" - - def _get_samples_per_device(self, disk_info, _attr, instance, _name): - """Return one or more Samples for meter 'disk.device.*'.""" - samples = [] - for disk, value in six.iteritems(disk_info.per_disk_info[_attr]): - samples.append(util.make_sample_from_instance( - instance, - name=_name, - type=sample.TYPE_GAUGE, - unit='B', - volume=value, - resource_id="%s-%s" % (instance.id, disk), - additional_metadata={'disk_name': disk}, - )) - return samples - - def _get_samples_task(self, instance, _name, disk_info, _attr1, _attr2): - """Return one or more Samples for meter 'disk.task.*'.""" - return [util.make_sample_from_instance( - instance, - name=_name, - type=sample.TYPE_GAUGE, - unit='B', - volume=getattr(disk_info, _attr1), - additional_metadata={ - 'device': disk_info.per_disk_info[_attr2].keys()}, - )] - - def get_samples(self, manager, cache, resources): - for instance in resources: - try: - disk_size_info = self._populate_cache( - self.inspector, - cache, - instance, - ) - for disk_info in self._get_samples(instance, disk_size_info): - yield disk_info - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except virt_inspector.InstanceShutOffException as e: - LOG.debug('Instance %(instance_id)s was shut off while ' - 'getting samples of %(pollster)s: %(exc)s', - {'instance_id': instance.id, - 'pollster': self.__class__.__name__, 'exc': e}) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('%(inspector)s does not provide data for ' - ' %(pollster)s', - {'inspector': self.inspector.__class__.__name__, - 'pollster': self.__class__.__name__}) - except Exception as err: - instance_name = util.instance_name(instance) - LOG.exception(_('Ignoring instance %(name)s ' - '(%(instance_id)s) : %(error)s') % ( - {'name': instance_name, - 'instance_id': instance.id, - 'error': err})) - - -class CapacityPollster(_DiskInfoPollsterBase): - - def _get_samples(self, instance, disk_info): - return self._get_samples_task( - instance, 'disk.capacity', disk_info, - 'capacity', 'capacity') - - -class PerDeviceCapacityPollster(_DiskInfoPollsterBase): - - def _get_samples(self, instance, disk_info): - return self._get_samples_per_device( - disk_info, 'capacity', instance, 'disk.device.capacity') - - -class AllocationPollster(_DiskInfoPollsterBase): - - def _get_samples(self, instance, disk_info): - return self._get_samples_task( - instance, 'disk.allocation', disk_info, - 'allocation', 'allocation') - - -class PerDeviceAllocationPollster(_DiskInfoPollsterBase): - - def _get_samples(self, instance, disk_info): - return self._get_samples_per_device( - disk_info, 'allocation', instance, 'disk.device.allocation') - - -class PhysicalPollster(_DiskInfoPollsterBase): - - def _get_samples(self, instance, disk_info): - return self._get_samples_task( - instance, 'disk.usage', disk_info, - 'physical', 'physical') - - -class PerDevicePhysicalPollster(_DiskInfoPollsterBase): - - def _get_samples(self, instance, disk_info): - return self._get_samples_per_device( - disk_info, 'physical', instance, 'disk.device.usage') diff --git a/ceilometer/compute/pollsters/instance.py b/ceilometer/compute/pollsters/instance.py deleted file mode 100644 index cc9e7eb7..00000000 --- a/ceilometer/compute/pollsters/instance.py +++ /dev/null @@ -1,33 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.compute import pollsters -from ceilometer.compute.pollsters import util -from ceilometer import sample - - -class InstancePollster(pollsters.BaseComputePollster): - - @staticmethod - def get_samples(manager, cache, resources): - for instance in resources: - yield util.make_sample_from_instance( - instance, - name='instance', - type=sample.TYPE_GAUGE, - unit='instance', - volume=1, - ) diff --git a/ceilometer/compute/pollsters/memory.py b/ceilometer/compute/pollsters/memory.py deleted file mode 100644 index 9f126336..00000000 --- a/ceilometer/compute/pollsters/memory.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log - -import ceilometer -from ceilometer.compute import pollsters -from ceilometer.compute.pollsters import util -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.i18n import _, _LE, _LW -from ceilometer import sample - -LOG = log.getLogger(__name__) - - -class MemoryUsagePollster(pollsters.BaseComputePollster): - - def get_samples(self, manager, cache, resources): - self._inspection_duration = self._record_poll_time() - for instance in resources: - LOG.debug('Checking memory usage for instance %s', instance.id) - try: - memory_info = self.inspector.inspect_memory_usage( - instance, self._inspection_duration) - LOG.debug("MEMORY USAGE: %(instance)s %(usage)f", - {'instance': instance, - 'usage': memory_info.usage}) - yield util.make_sample_from_instance( - instance, - name='memory.usage', - type=sample.TYPE_GAUGE, - unit='MB', - volume=memory_info.usage, - ) - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except virt_inspector.InstanceShutOffException as e: - LOG.debug('Instance %(instance_id)s was shut off while ' - 'getting samples of %(pollster)s: %(exc)s', - {'instance_id': instance.id, - 'pollster': self.__class__.__name__, 'exc': e}) - except virt_inspector.NoDataException as e: - LOG.warning(_LW('Cannot inspect data of %(pollster)s for ' - '%(instance_id)s, non-fatal reason: %(exc)s'), - {'pollster': self.__class__.__name__, - 'instance_id': instance.id, 'exc': e}) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('Obtaining Memory Usage is not implemented for %s', - self.inspector.__class__.__name__) - except Exception as err: - LOG.exception(_('Could not get Memory Usage for ' - '%(id)s: %(e)s'), {'id': instance.id, - 'e': err}) - - -class MemoryResidentPollster(pollsters.BaseComputePollster): - - def get_samples(self, manager, cache, resources): - self._inspection_duration = self._record_poll_time() - for instance in resources: - LOG.debug('Checking resident memory for instance %s', - instance.id) - try: - memory_info = self.inspector.inspect_memory_resident( - instance, self._inspection_duration) - LOG.debug("RESIDENT MEMORY: %(instance)s %(resident)f", - {'instance': instance, - 'resident': memory_info.resident}) - yield util.make_sample_from_instance( - instance, - name='memory.resident', - type=sample.TYPE_GAUGE, - unit='MB', - volume=memory_info.resident, - ) - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except virt_inspector.InstanceShutOffException as e: - LOG.debug('Instance %(instance_id)s was shut off while ' - 'getting samples of %(pollster)s: %(exc)s', - {'instance_id': instance.id, - 'pollster': self.__class__.__name__, 'exc': e}) - except virt_inspector.NoDataException as e: - LOG.warning(_LW('Cannot inspect data of %(pollster)s for ' - '%(instance_id)s, non-fatal reason: %(exc)s'), - {'pollster': self.__class__.__name__, - 'instance_id': instance.id, 'exc': e}) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('Obtaining Resident Memory is not implemented' - ' for %s', self.inspector.__class__.__name__) - except Exception as err: - LOG.exception(_LE('Could not get Resident Memory Usage for ' - '%(id)s: %(e)s'), {'id': instance.id, - 'e': err}) diff --git a/ceilometer/compute/pollsters/net.py b/ceilometer/compute/pollsters/net.py deleted file mode 100644 index e3131f99..00000000 --- a/ceilometer/compute/pollsters/net.py +++ /dev/null @@ -1,210 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from oslo_log import log - -import ceilometer -from ceilometer.compute import pollsters -from ceilometer.compute.pollsters import util -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.i18n import _ -from ceilometer import sample - -LOG = log.getLogger(__name__) - - -class _Base(pollsters.BaseComputePollster): - - NET_USAGE_MESSAGE = ' '.join(["NETWORK USAGE:", "%s %s:", "read-bytes=%d", - "write-bytes=%d"]) - - @staticmethod - def make_vnic_sample(instance, name, type, unit, volume, vnic_data): - metadata = copy.copy(vnic_data) - additional_metadata = dict(zip(metadata._fields, metadata)) - if vnic_data.fref is not None: - rid = vnic_data.fref - additional_metadata['vnic_name'] = vnic_data.fref - else: - instance_name = util.instance_name(instance) - rid = "%s-%s-%s" % (instance_name, instance.id, vnic_data.name) - additional_metadata['vnic_name'] = vnic_data.name - - return util.make_sample_from_instance( - instance=instance, - name=name, - type=type, - unit=unit, - volume=volume, - resource_id=rid, - additional_metadata=additional_metadata - ) - - CACHE_KEY_VNIC = 'vnics' - - def _get_vnic_info(self, inspector, instance): - return inspector.inspect_vnics(instance) - - @staticmethod - def _get_rx_info(info): - return info.rx_bytes - - @staticmethod - def _get_tx_info(info): - return info.tx_bytes - - def _get_vnics_for_instance(self, cache, inspector, instance): - i_cache = cache.setdefault(self.CACHE_KEY_VNIC, {}) - if instance.id not in i_cache: - i_cache[instance.id] = list( - self._get_vnic_info(inspector, instance) - ) - return i_cache[instance.id] - - def get_samples(self, manager, cache, resources): - self._inspection_duration = self._record_poll_time() - for instance in resources: - instance_name = util.instance_name(instance) - LOG.debug('checking net info for instance %s', instance.id) - try: - vnics = self._get_vnics_for_instance( - cache, - self.inspector, - instance, - ) - for vnic, info in vnics: - LOG.debug(self.NET_USAGE_MESSAGE, instance_name, - vnic.name, self._get_rx_info(info), - self._get_tx_info(info)) - yield self._get_sample(instance, vnic, info) - except virt_inspector.InstanceNotFoundException as err: - # Instance was deleted while getting samples. Ignore it. - LOG.debug('Exception while getting samples %s', err) - except virt_inspector.InstanceShutOffException as e: - LOG.debug('Instance %(instance_id)s was shut off while ' - 'getting samples of %(pollster)s: %(exc)s', - {'instance_id': instance.id, - 'pollster': self.__class__.__name__, 'exc': e}) - except ceilometer.NotImplementedError: - # Selected inspector does not implement this pollster. - LOG.debug('%(inspector)s does not provide data for ' - ' %(pollster)s', - {'inspector': self.inspector.__class__.__name__, - 'pollster': self.__class__.__name__}) - except Exception as err: - LOG.exception(_('Ignoring instance %(name)s: %(error)s'), - {'name': instance_name, 'error': err}) - - -class _RateBase(_Base): - - NET_USAGE_MESSAGE = ' '.join(["NETWORK RATE:", "%s %s:", - "read-bytes-rate=%d", - "write-bytes-rate=%d"]) - - CACHE_KEY_VNIC = 'vnic-rates' - - def _get_vnic_info(self, inspector, instance): - return inspector.inspect_vnic_rates(instance, - self._inspection_duration) - - @staticmethod - def _get_rx_info(info): - return info.rx_bytes_rate - - @staticmethod - def _get_tx_info(info): - return info.tx_bytes_rate - - -class IncomingBytesPollster(_Base): - - def _get_sample(self, instance, vnic, info): - return self.make_vnic_sample( - instance, - name='network.incoming.bytes', - type=sample.TYPE_CUMULATIVE, - unit='B', - volume=info.rx_bytes, - vnic_data=vnic, - ) - - -class IncomingPacketsPollster(_Base): - - def _get_sample(self, instance, vnic, info): - return self.make_vnic_sample( - instance, - name='network.incoming.packets', - type=sample.TYPE_CUMULATIVE, - unit='packet', - volume=info.rx_packets, - vnic_data=vnic, - ) - - -class OutgoingBytesPollster(_Base): - - def _get_sample(self, instance, vnic, info): - return self.make_vnic_sample( - instance, - name='network.outgoing.bytes', - type=sample.TYPE_CUMULATIVE, - unit='B', - volume=info.tx_bytes, - vnic_data=vnic, - ) - - -class OutgoingPacketsPollster(_Base): - - def _get_sample(self, instance, vnic, info): - return self.make_vnic_sample( - instance, - name='network.outgoing.packets', - type=sample.TYPE_CUMULATIVE, - unit='packet', - volume=info.tx_packets, - vnic_data=vnic, - ) - - -class IncomingBytesRatePollster(_RateBase): - - def _get_sample(self, instance, vnic, info): - return self.make_vnic_sample( - instance, - name='network.incoming.bytes.rate', - type=sample.TYPE_GAUGE, - unit='B/s', - volume=info.rx_bytes_rate, - vnic_data=vnic, - ) - - -class OutgoingBytesRatePollster(_RateBase): - - def _get_sample(self, instance, vnic, info): - return self.make_vnic_sample( - instance, - name='network.outgoing.bytes.rate', - type=sample.TYPE_GAUGE, - unit='B/s', - volume=info.tx_bytes_rate, - vnic_data=vnic, - ) diff --git a/ceilometer/compute/pollsters/util.py b/ceilometer/compute/pollsters/util.py deleted file mode 100644 index 1fd4f95f..00000000 --- a/ceilometer/compute/pollsters/util.py +++ /dev/null @@ -1,96 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.compute import util as compute_util -from ceilometer import sample - - -INSTANCE_PROPERTIES = [ - # Identity properties - 'reservation_id', - # Type properties - 'architecture', - 'OS-EXT-AZ:availability_zone', - 'kernel_id', - 'os_type', - 'ramdisk_id', -] - - -def _get_metadata_from_object(instance): - """Return a metadata dictionary for the instance.""" - instance_type = instance.flavor['name'] if instance.flavor else None - metadata = { - 'display_name': instance.name, - 'name': getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', u''), - 'instance_id': instance.id, - 'instance_type': instance_type, - 'host': instance.hostId, - 'instance_host': getattr(instance, 'OS-EXT-SRV-ATTR:host', u''), - 'flavor': instance.flavor, - 'status': instance.status.lower(), - 'state': getattr(instance, 'OS-EXT-STS:vm_state', u''), - } - - # Image properties - if instance.image: - metadata['image'] = instance.image - metadata['image_ref'] = instance.image['id'] - # Images that come through the conductor API in the nova notifier - # plugin will not have links. - if instance.image.get('links'): - metadata['image_ref_url'] = instance.image['links'][0]['href'] - else: - metadata['image_ref_url'] = None - else: - metadata['image'] = None - metadata['image_ref'] = None - metadata['image_ref_url'] = None - - for name in INSTANCE_PROPERTIES: - if hasattr(instance, name): - metadata[name] = getattr(instance, name) - - metadata['vcpus'] = instance.flavor['vcpus'] - metadata['memory_mb'] = instance.flavor['ram'] - metadata['disk_gb'] = instance.flavor['disk'] - metadata['ephemeral_gb'] = instance.flavor['ephemeral'] - metadata['root_gb'] = (int(metadata['disk_gb']) - - int(metadata['ephemeral_gb'])) - - return compute_util.add_reserved_user_metadata(instance.metadata, metadata) - - -def make_sample_from_instance(instance, name, type, unit, volume, - resource_id=None, additional_metadata=None): - additional_metadata = additional_metadata or {} - resource_metadata = _get_metadata_from_object(instance) - resource_metadata.update(additional_metadata) - return sample.Sample( - name=name, - type=type, - unit=unit, - volume=volume, - user_id=instance.user_id, - project_id=instance.tenant_id, - resource_id=resource_id or instance.id, - resource_metadata=resource_metadata, - ) - - -def instance_name(instance): - """Shortcut to get instance name.""" - return getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', None) diff --git a/ceilometer/compute/util.py b/ceilometer/compute/util.py deleted file mode 100644 index 6c253839..00000000 --- a/ceilometer/compute/util.py +++ /dev/null @@ -1,66 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import six - - -# Below config is for collecting metadata which user defined in nova or else, -# and then storing it to Sample for future use according to user's requirement. -# Such as using it as OpenTSDB tags for metrics. -OPTS = [ - cfg.ListOpt('reserved_metadata_namespace', - default=['metering.'], - help='List of metadata prefixes reserved for metering use.'), - cfg.IntOpt('reserved_metadata_length', - default=256, - help='Limit on length of reserved metadata values.'), - cfg.ListOpt('reserved_metadata_keys', - default=[], - help='List of metadata keys reserved for metering use. And ' - 'these keys are additional to the ones included in the ' - 'namespace.'), -] - -cfg.CONF.register_opts(OPTS) - - -def add_reserved_user_metadata(src_metadata, dest_metadata): - limit = cfg.CONF.reserved_metadata_length - user_metadata = {} - for prefix in cfg.CONF.reserved_metadata_namespace: - md = dict( - (k[len(prefix):].replace('.', '_'), - v[:limit] if isinstance(v, six.string_types) else v) - for k, v in src_metadata.items() - if (k.startswith(prefix) and - k[len(prefix):].replace('.', '_') not in dest_metadata) - ) - user_metadata.update(md) - - for metadata_key in cfg.CONF.reserved_metadata_keys: - md = dict( - (k.replace('.', '_'), - v[:limit] if isinstance(v, six.string_types) else v) - for k, v in src_metadata.items() - if (k == metadata_key and - k.replace('.', '_') not in dest_metadata) - ) - user_metadata.update(md) - - if user_metadata: - dest_metadata['user_metadata'] = user_metadata - - return dest_metadata diff --git a/ceilometer/compute/virt/__init__.py b/ceilometer/compute/virt/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/compute/virt/hyperv/__init__.py b/ceilometer/compute/virt/hyperv/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/compute/virt/hyperv/inspector.py b/ceilometer/compute/virt/hyperv/inspector.py deleted file mode 100644 index 38409295..00000000 --- a/ceilometer/compute/virt/hyperv/inspector.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Implementation of Inspector abstraction for Hyper-V""" - -import collections -import functools -import sys - -from os_win import exceptions as os_win_exc -from os_win import utilsfactory -from oslo_utils import units -import six - -from ceilometer.compute.pollsters import util -from ceilometer.compute.virt import inspector as virt_inspector - - -def convert_exceptions(function, exception_map): - expected_exceptions = tuple(exception_map.keys()) - - @functools.wraps(function) - def wrapper(*args, **kwargs): - try: - return function(*args, **kwargs) - except expected_exceptions as ex: - # exception might be a subclass of an expected exception. - for expected in expected_exceptions: - if isinstance(ex, expected): - raised_exception = exception_map[expected] - break - - exc_info = sys.exc_info() - # NOTE(claudiub): Python 3 raises the exception object given as - # the second argument in six.reraise. - # The original message will be maintained by passing the original - # exception. - exc = raised_exception(six.text_type(exc_info[1])) - six.reraise(raised_exception, exc, exc_info[2]) - return wrapper - - -def decorate_all_methods(decorator, *args, **kwargs): - def decorate(cls): - for attr in cls.__dict__: - class_member = getattr(cls, attr) - if callable(class_member): - setattr(cls, attr, decorator(class_member, *args, **kwargs)) - return cls - - return decorate - - -exception_conversion_map = collections.OrderedDict([ - # NOTE(claudiub): order should be from the most specialized exception type - # to the most generic exception type. - # (expected_exception, converted_exception) - (os_win_exc.NotFound, virt_inspector.InstanceNotFoundException), - (os_win_exc.OSWinException, virt_inspector.InspectorException), -]) - -# NOTE(claudiub): the purpose of the decorator below is to prevent any -# os_win exceptions (subclasses of OSWinException) to leak outside of the -# HyperVInspector. - - -@decorate_all_methods(convert_exceptions, exception_conversion_map) -class HyperVInspector(virt_inspector.Inspector): - - def __init__(self): - super(HyperVInspector, self).__init__() - self._utils = utilsfactory.get_metricsutils() - self._host_max_cpu_clock = self._compute_host_max_cpu_clock() - - def _compute_host_max_cpu_clock(self): - hostutils = utilsfactory.get_hostutils() - # host's number of CPUs and CPU clock speed will not change. - cpu_info = hostutils.get_cpus_info() - host_cpu_count = len(cpu_info) - host_cpu_clock = cpu_info[0]['MaxClockSpeed'] - - return float(host_cpu_clock * host_cpu_count) - - def inspect_cpus(self, instance): - instance_name = util.instance_name(instance) - (cpu_clock_used, - cpu_count, uptime) = self._utils.get_cpu_metrics(instance_name) - - cpu_percent_used = cpu_clock_used / self._host_max_cpu_clock - - # Nanoseconds - cpu_time = (int(uptime * cpu_percent_used) * units.k) - - return virt_inspector.CPUStats(number=cpu_count, time=cpu_time) - - def inspect_memory_usage(self, instance, duration=None): - instance_name = util.instance_name(instance) - usage = self._utils.get_memory_metrics(instance_name) - return virt_inspector.MemoryUsageStats(usage=usage) - - def inspect_vnics(self, instance): - instance_name = util.instance_name(instance) - for vnic_metrics in self._utils.get_vnic_metrics(instance_name): - interface = virt_inspector.Interface( - name=vnic_metrics["element_name"], - mac=vnic_metrics["address"], - fref=None, - parameters=None) - - stats = virt_inspector.InterfaceStats( - rx_bytes=vnic_metrics['rx_mb'] * units.Mi, - rx_packets=0, - tx_bytes=vnic_metrics['tx_mb'] * units.Mi, - tx_packets=0) - - yield (interface, stats) - - def inspect_disks(self, instance): - instance_name = util.instance_name(instance) - for disk_metrics in self._utils.get_disk_metrics(instance_name): - disk = virt_inspector.Disk(device=disk_metrics['instance_id']) - stats = virt_inspector.DiskStats( - read_requests=0, - # Return bytes - read_bytes=disk_metrics['read_mb'] * units.Mi, - write_requests=0, - write_bytes=disk_metrics['write_mb'] * units.Mi, - errors=0) - - yield (disk, stats) - - def inspect_disk_latency(self, instance): - instance_name = util.instance_name(instance) - for disk_metrics in self._utils.get_disk_latency_metrics( - instance_name): - disk = virt_inspector.Disk(device=disk_metrics['instance_id']) - stats = virt_inspector.DiskLatencyStats( - disk_latency=disk_metrics['disk_latency']) - - yield (disk, stats) - - def inspect_disk_iops(self, instance): - instance_name = util.instance_name(instance) - for disk_metrics in self._utils.get_disk_iops_count(instance_name): - disk = virt_inspector.Disk(device=disk_metrics['instance_id']) - stats = virt_inspector.DiskIOPSStats( - iops_count=disk_metrics['iops_count']) - - yield (disk, stats) diff --git a/ceilometer/compute/virt/inspector.py b/ceilometer/compute/virt/inspector.py deleted file mode 100644 index 484bf82f..00000000 --- a/ceilometer/compute/virt/inspector.py +++ /dev/null @@ -1,315 +0,0 @@ -# -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Inspector abstraction for read-only access to hypervisors.""" - -import collections - -from oslo_config import cfg -from oslo_log import log -from stevedore import driver - -import ceilometer -from ceilometer.i18n import _ - - -OPTS = [ - cfg.StrOpt('hypervisor_inspector', - default='libvirt', - help='Inspector to use for inspecting the hypervisor layer. ' - 'Known inspectors are libvirt, hyperv, vmware, xenapi ' - 'and powervm.'), -] - -cfg.CONF.register_opts(OPTS) - - -LOG = log.getLogger(__name__) - -# Named tuple representing instances. -# -# name: the name of the instance -# uuid: the UUID associated with the instance -# -Instance = collections.namedtuple('Instance', ['name', 'UUID']) - - -# Named tuple representing CPU statistics. -# -# number: number of CPUs -# time: cumulative CPU time -# -CPUStats = collections.namedtuple('CPUStats', ['number', 'time']) - -# Named tuple representing CPU Utilization statistics. -# -# util: CPU utilization in percentage -# -CPUUtilStats = collections.namedtuple('CPUUtilStats', ['util']) - -# Named tuple representing Memory usage statistics. -# -# usage: Amount of memory used -# -MemoryUsageStats = collections.namedtuple('MemoryUsageStats', ['usage']) - - -# Named tuple representing Resident Memory usage statistics. -# -# resident: Amount of resident memory -# -MemoryResidentStats = collections.namedtuple('MemoryResidentStats', - ['resident']) - - -# Named tuple representing vNICs. -# -# name: the name of the vNIC -# mac: the MAC address -# fref: the filter ref -# parameters: miscellaneous parameters -# -Interface = collections.namedtuple('Interface', ['name', 'mac', - 'fref', 'parameters']) - - -# Named tuple representing vNIC statistics. -# -# rx_bytes: number of received bytes -# rx_packets: number of received packets -# tx_bytes: number of transmitted bytes -# tx_packets: number of transmitted packets -# -InterfaceStats = collections.namedtuple('InterfaceStats', - ['rx_bytes', 'rx_packets', - 'tx_bytes', 'tx_packets']) - - -# Named tuple representing vNIC rate statistics. -# -# rx_bytes_rate: rate of received bytes -# tx_bytes_rate: rate of transmitted bytes -# -InterfaceRateStats = collections.namedtuple('InterfaceRateStats', - ['rx_bytes_rate', 'tx_bytes_rate']) - - -# Named tuple representing disks. -# -# device: the device name for the disk -# -Disk = collections.namedtuple('Disk', ['device']) - - -# Named tuple representing disk statistics. -# -# read_bytes: number of bytes read -# read_requests: number of read operations -# write_bytes: number of bytes written -# write_requests: number of write operations -# errors: number of errors -# -DiskStats = collections.namedtuple('DiskStats', - ['read_bytes', 'read_requests', - 'write_bytes', 'write_requests', - 'errors']) - -# Named tuple representing disk rate statistics. -# -# read_bytes_rate: number of bytes read per second -# read_requests_rate: number of read operations per second -# write_bytes_rate: number of bytes written per second -# write_requests_rate: number of write operations per second -# -DiskRateStats = collections.namedtuple('DiskRateStats', - ['read_bytes_rate', - 'read_requests_rate', - 'write_bytes_rate', - 'write_requests_rate']) - -# Named tuple representing disk latency statistics. -# -# disk_latency: average disk latency -# -DiskLatencyStats = collections.namedtuple('DiskLatencyStats', - ['disk_latency']) - -# Named tuple representing disk iops statistics. -# -# iops: number of iops per second -# -DiskIOPSStats = collections.namedtuple('DiskIOPSStats', - ['iops_count']) - - -# Named tuple representing disk Information. -# -# capacity: capacity of the disk -# allocation: allocation of the disk -# physical: usage of the disk - -DiskInfo = collections.namedtuple('DiskInfo', - ['capacity', - 'allocation', - 'physical']) - - -# Exception types -# -class InspectorException(Exception): - def __init__(self, message=None): - super(InspectorException, self).__init__(message) - - -class InstanceNotFoundException(InspectorException): - pass - - -class InstanceShutOffException(InspectorException): - pass - - -class NoDataException(InspectorException): - pass - - -class NoSanityException(InspectorException): - pass - - -# Main virt inspector abstraction layering over the hypervisor API. -# -class Inspector(object): - - def check_sanity(self): - """Check the sanity of hypervisor inspector. - - Each subclass could overwrite it to throw any exception - when detecting mis-configured inspector - """ - pass - - def inspect_cpus(self, instance): - """Inspect the CPU statistics for an instance. - - :param instance: the target instance - :return: the number of CPUs and cumulative CPU time - """ - raise ceilometer.NotImplementedError - - def inspect_cpu_util(self, instance, duration=None): - """Inspect the CPU Utilization (%) for an instance. - - :param instance: the target instance - :param duration: the last 'n' seconds, over which the value should be - inspected - :return: the percentage of CPU utilization - """ - raise ceilometer.NotImplementedError - - def inspect_vnics(self, instance): - """Inspect the vNIC statistics for an instance. - - :param instance: the target instance - :return: for each vNIC, the number of bytes & packets - received and transmitted - """ - raise ceilometer.NotImplementedError - - def inspect_vnic_rates(self, instance, duration=None): - """Inspect the vNIC rate statistics for an instance. - - :param instance: the target instance - :param duration: the last 'n' seconds, over which the value should be - inspected - :return: for each vNIC, the rate of bytes & packets - received and transmitted - """ - raise ceilometer.NotImplementedError - - def inspect_disks(self, instance): - """Inspect the disk statistics for an instance. - - :param instance: the target instance - :return: for each disk, the number of bytes & operations - read and written, and the error count - """ - raise ceilometer.NotImplementedError - - def inspect_memory_usage(self, instance, duration=None): - """Inspect the memory usage statistics for an instance. - - :param instance: the target instance - :param duration: the last 'n' seconds, over which the value should be - inspected - :return: the amount of memory used - """ - raise ceilometer.NotImplementedError - - def inspect_memory_resident(self, instance, duration=None): - """Inspect the resident memory statistics for an instance. - - :param instance: the target instance - :param duration: the last 'n' seconds, over which the value should be - inspected - :return: the amount of resident memory - """ - raise ceilometer.NotImplementedError - - def inspect_disk_rates(self, instance, duration=None): - """Inspect the disk statistics as rates for an instance. - - :param instance: the target instance - :param duration: the last 'n' seconds, over which the value should be - inspected - :return: for each disk, the number of bytes & operations - read and written per second, with the error count - """ - raise ceilometer.NotImplementedError - - def inspect_disk_latency(self, instance): - """Inspect the disk statistics as rates for an instance. - - :param instance: the target instance - :return: for each disk, the average disk latency - """ - raise ceilometer.NotImplementedError - - def inspect_disk_iops(self, instance): - """Inspect the disk statistics as rates for an instance. - - :param instance: the target instance - :return: for each disk, the number of iops per second - """ - raise ceilometer.NotImplementedError - - def inspect_disk_info(self, instance): - """Inspect the disk information for an instance. - - :param instance: the target instance - :return: for each disk , capacity , alloaction and usage - """ - raise ceilometer.NotImplementedError - - -def get_hypervisor_inspector(): - try: - namespace = 'ceilometer.compute.virt' - mgr = driver.DriverManager(namespace, - cfg.CONF.hypervisor_inspector, - invoke_on_load=True) - return mgr.driver - except ImportError as e: - LOG.error(_("Unable to load the hypervisor inspector: %s") % e) - return Inspector() diff --git a/ceilometer/compute/virt/libvirt/__init__.py b/ceilometer/compute/virt/libvirt/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/compute/virt/libvirt/inspector.py b/ceilometer/compute/virt/libvirt/inspector.py deleted file mode 100644 index 8220d997..00000000 --- a/ceilometer/compute/virt/libvirt/inspector.py +++ /dev/null @@ -1,228 +0,0 @@ -# -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Implementation of Inspector abstraction for libvirt.""" - -from lxml import etree -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import units -import six - -from ceilometer.compute.pollsters import util -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.i18n import _ - -libvirt = None - -LOG = logging.getLogger(__name__) - -OPTS = [ - cfg.StrOpt('libvirt_type', - default='kvm', - choices=['kvm', 'lxc', 'qemu', 'uml', 'xen'], - help='Libvirt domain type.'), - cfg.StrOpt('libvirt_uri', - default='', - help='Override the default libvirt URI ' - '(which is dependent on libvirt_type).'), -] - -CONF = cfg.CONF -CONF.register_opts(OPTS) - - -def retry_on_disconnect(function): - def decorator(self, *args, **kwargs): - try: - return function(self, *args, **kwargs) - except ImportError: - # NOTE(sileht): in case of libvirt failed to be imported - raise - except libvirt.libvirtError as e: - if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR, - libvirt.VIR_ERR_INTERNAL_ERROR) and - e.get_error_domain() in (libvirt.VIR_FROM_REMOTE, - libvirt.VIR_FROM_RPC)): - LOG.debug('Connection to libvirt broken') - self.connection = None - return function(self, *args, **kwargs) - else: - raise - return decorator - - -class LibvirtInspector(virt_inspector.Inspector): - - per_type_uris = dict(uml='uml:///system', xen='xen:///', lxc='lxc:///') - - def __init__(self): - self.uri = self._get_uri() - self.connection = None - - def _get_uri(self): - return CONF.libvirt_uri or self.per_type_uris.get(CONF.libvirt_type, - 'qemu:///system') - - def _get_connection(self): - if not self.connection: - global libvirt - if libvirt is None: - libvirt = __import__('libvirt') - LOG.debug('Connecting to libvirt: %s', self.uri) - self.connection = libvirt.openReadOnly(self.uri) - - return self.connection - - def check_sanity(self): - if not self._get_connection(): - raise virt_inspector.NoSanityException() - - @retry_on_disconnect - def _lookup_by_uuid(self, instance): - instance_name = util.instance_name(instance) - try: - return self._get_connection().lookupByUUIDString(instance.id) - except Exception as ex: - if not libvirt or not isinstance(ex, libvirt.libvirtError): - raise virt_inspector.InspectorException(six.text_type(ex)) - error_code = ex.get_error_code() - if (error_code in (libvirt.VIR_ERR_SYSTEM_ERROR, - libvirt.VIR_ERR_INTERNAL_ERROR) and - ex.get_error_domain() in (libvirt.VIR_FROM_REMOTE, - libvirt.VIR_FROM_RPC)): - raise - msg = _("Error from libvirt while looking up instance " - ": " - "[Error Code %(error_code)s] " - "%(ex)s") % {'name': instance_name, - 'id': instance.id, - 'error_code': error_code, - 'ex': ex} - raise virt_inspector.InstanceNotFoundException(msg) - - def inspect_cpus(self, instance): - domain = self._get_domain_not_shut_off_or_raise(instance) - dom_info = domain.info() - return virt_inspector.CPUStats(number=dom_info[3], time=dom_info[4]) - - def _get_domain_not_shut_off_or_raise(self, instance): - instance_name = util.instance_name(instance) - domain = self._lookup_by_uuid(instance) - - state = domain.info()[0] - if state == libvirt.VIR_DOMAIN_SHUTOFF: - msg = _('Failed to inspect data of instance ' - ', ' - 'domain state is SHUTOFF.') % { - 'name': instance_name, 'id': instance.id} - raise virt_inspector.InstanceShutOffException(msg) - - return domain - - def inspect_vnics(self, instance): - domain = self._get_domain_not_shut_off_or_raise(instance) - - tree = etree.fromstring(domain.XMLDesc(0)) - for iface in tree.findall('devices/interface'): - target = iface.find('target') - if target is not None: - name = target.get('dev') - else: - continue - mac = iface.find('mac') - if mac is not None: - mac_address = mac.get('address') - else: - continue - fref = iface.find('filterref') - if fref is not None: - fref = fref.get('filter') - - params = dict((p.get('name').lower(), p.get('value')) - for p in iface.findall('filterref/parameter')) - interface = virt_inspector.Interface(name=name, mac=mac_address, - fref=fref, parameters=params) - dom_stats = domain.interfaceStats(name) - stats = virt_inspector.InterfaceStats(rx_bytes=dom_stats[0], - rx_packets=dom_stats[1], - tx_bytes=dom_stats[4], - tx_packets=dom_stats[5]) - yield (interface, stats) - - def inspect_disks(self, instance): - domain = self._get_domain_not_shut_off_or_raise(instance) - - tree = etree.fromstring(domain.XMLDesc(0)) - for device in filter( - bool, - [target.get("dev") - for target in tree.findall('devices/disk/target')]): - disk = virt_inspector.Disk(device=device) - block_stats = domain.blockStats(device) - stats = virt_inspector.DiskStats(read_requests=block_stats[0], - read_bytes=block_stats[1], - write_requests=block_stats[2], - write_bytes=block_stats[3], - errors=block_stats[4]) - yield (disk, stats) - - def inspect_memory_usage(self, instance, duration=None): - instance_name = util.instance_name(instance) - domain = self._get_domain_not_shut_off_or_raise(instance) - - try: - memory_stats = domain.memoryStats() - if (memory_stats and - memory_stats.get('available') and - memory_stats.get('unused')): - memory_used = (memory_stats.get('available') - - memory_stats.get('unused')) - # Stat provided from libvirt is in KB, converting it to MB. - memory_used = memory_used / units.Ki - return virt_inspector.MemoryUsageStats(usage=memory_used) - else: - msg = _('Failed to inspect memory usage of instance ' - ', ' - 'can not get info from libvirt.') % { - 'name': instance_name, 'id': instance.id} - raise virt_inspector.NoDataException(msg) - # memoryStats might launch an exception if the method is not supported - # by the underlying hypervisor being used by libvirt. - except libvirt.libvirtError as e: - msg = _('Failed to inspect memory usage of %(instance_uuid)s, ' - 'can not get info from libvirt: %(error)s') % { - 'instance_uuid': instance.id, 'error': e} - raise virt_inspector.NoDataException(msg) - - def inspect_disk_info(self, instance): - domain = self._get_domain_not_shut_off_or_raise(instance) - - tree = etree.fromstring(domain.XMLDesc(0)) - for device in filter( - bool, - [target.get("dev") - for target in tree.findall('devices/disk/target')]): - disk = virt_inspector.Disk(device=device) - block_info = domain.blockInfo(device) - info = virt_inspector.DiskInfo(capacity=block_info[0], - allocation=block_info[1], - physical=block_info[2]) - - yield (disk, info) - - def inspect_memory_resident(self, instance, duration=None): - domain = self._get_domain_not_shut_off_or_raise(instance) - memory = domain.memoryStats()['rss'] / units.Ki - return virt_inspector.MemoryResidentStats(resident=memory) diff --git a/ceilometer/compute/virt/vmware/__init__.py b/ceilometer/compute/virt/vmware/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/compute/virt/vmware/inspector.py b/ceilometer/compute/virt/vmware/inspector.py deleted file mode 100644 index 0009defa..00000000 --- a/ceilometer/compute/virt/vmware/inspector.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of Inspector abstraction for VMware vSphere""" - -from oslo_config import cfg -from oslo_utils import units -from oslo_vmware import api -import six - -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.compute.virt.vmware import vsphere_operations -from ceilometer.i18n import _ - - -opt_group = cfg.OptGroup(name='vmware', - title='Options for VMware') - -OPTS = [ - cfg.StrOpt('host_ip', - default='', - help='IP address of the VMware vSphere host.'), - cfg.PortOpt('host_port', - default=443, - help='Port of the VMware vSphere host.'), - cfg.StrOpt('host_username', - default='', - help='Username of VMware vSphere.'), - cfg.StrOpt('host_password', - default='', - help='Password of VMware vSphere.', - secret=True), - cfg.StrOpt('ca_file', - help='CA bundle file to use in verifying the vCenter server ' - 'certificate.'), - cfg.BoolOpt('insecure', - default=False, - help='If true, the vCenter server certificate is not ' - 'verified. If false, then the default CA truststore is ' - 'used for verification. This option is ignored if ' - '"ca_file" is set.'), - cfg.IntOpt('api_retry_count', - default=10, - help='Number of times a VMware vSphere API may be retried.'), - cfg.FloatOpt('task_poll_interval', - default=0.5, - help='Sleep time in seconds for polling an ongoing async ' - 'task.'), - cfg.StrOpt('wsdl_location', - help='Optional vim service WSDL location ' - 'e.g http:///vimService.wsdl. ' - 'Optional over-ride to default location for bug ' - 'work-arounds.'), -] - -cfg.CONF.register_group(opt_group) -cfg.CONF.register_opts(OPTS, group=opt_group) - -VC_AVERAGE_MEMORY_CONSUMED_CNTR = 'mem:consumed:average' -VC_AVERAGE_CPU_CONSUMED_CNTR = 'cpu:usage:average' -VC_NETWORK_RX_COUNTER = 'net:received:average' -VC_NETWORK_TX_COUNTER = 'net:transmitted:average' -VC_DISK_READ_RATE_CNTR = "disk:read:average" -VC_DISK_READ_REQUESTS_RATE_CNTR = "disk:numberReadAveraged:average" -VC_DISK_WRITE_RATE_CNTR = "disk:write:average" -VC_DISK_WRITE_REQUESTS_RATE_CNTR = "disk:numberWriteAveraged:average" - - -def get_api_session(): - api_session = api.VMwareAPISession( - cfg.CONF.vmware.host_ip, - cfg.CONF.vmware.host_username, - cfg.CONF.vmware.host_password, - cfg.CONF.vmware.api_retry_count, - cfg.CONF.vmware.task_poll_interval, - wsdl_loc=cfg.CONF.vmware.wsdl_location, - port=cfg.CONF.vmware.host_port, - cacert=cfg.CONF.vmware.ca_file, - insecure=cfg.CONF.vmware.insecure) - return api_session - - -class VsphereInspector(virt_inspector.Inspector): - - def __init__(self): - super(VsphereInspector, self).__init__() - self._ops = vsphere_operations.VsphereOperations( - get_api_session(), 1000) - - def inspect_cpu_util(self, instance, duration=None): - vm_moid = self._ops.get_vm_moid(instance.id) - if vm_moid is None: - raise virt_inspector.InstanceNotFoundException( - _('VM %s not found in VMware vSphere') % instance.id) - cpu_util_counter_id = self._ops.get_perf_counter_id( - VC_AVERAGE_CPU_CONSUMED_CNTR) - cpu_util = self._ops.query_vm_aggregate_stats( - vm_moid, cpu_util_counter_id, duration) - - # For this counter vSphere returns values scaled-up by 100, since the - # corresponding API can't return decimals, but only longs. - # For e.g. if the utilization is 12.34%, the value returned is 1234. - # Hence, dividing by 100. - cpu_util = cpu_util / 100 - return virt_inspector.CPUUtilStats(util=cpu_util) - - def inspect_vnic_rates(self, instance, duration=None): - vm_moid = self._ops.get_vm_moid(instance.id) - if not vm_moid: - raise virt_inspector.InstanceNotFoundException( - _('VM %s not found in VMware vSphere') % instance.id) - - vnic_stats = {} - vnic_ids = set() - - for net_counter in (VC_NETWORK_RX_COUNTER, VC_NETWORK_TX_COUNTER): - net_counter_id = self._ops.get_perf_counter_id(net_counter) - vnic_id_to_stats_map = self._ops.query_vm_device_stats( - vm_moid, net_counter_id, duration) - vnic_stats[net_counter] = vnic_id_to_stats_map - vnic_ids.update(six.iterkeys(vnic_id_to_stats_map)) - - # Stats provided from vSphere are in KB/s, converting it to B/s. - for vnic_id in vnic_ids: - rx_bytes_rate = (vnic_stats[VC_NETWORK_RX_COUNTER] - .get(vnic_id, 0) * units.Ki) - tx_bytes_rate = (vnic_stats[VC_NETWORK_TX_COUNTER] - .get(vnic_id, 0) * units.Ki) - - stats = virt_inspector.InterfaceRateStats(rx_bytes_rate, - tx_bytes_rate) - interface = virt_inspector.Interface( - name=vnic_id, - mac=None, - fref=None, - parameters=None) - yield (interface, stats) - - def inspect_memory_usage(self, instance, duration=None): - vm_moid = self._ops.get_vm_moid(instance.id) - if vm_moid is None: - raise virt_inspector.InstanceNotFoundException( - _('VM %s not found in VMware vSphere') % instance.id) - mem_counter_id = self._ops.get_perf_counter_id( - VC_AVERAGE_MEMORY_CONSUMED_CNTR) - memory = self._ops.query_vm_aggregate_stats( - vm_moid, mem_counter_id, duration) - # Stat provided from vSphere is in KB, converting it to MB. - memory = memory / units.Ki - return virt_inspector.MemoryUsageStats(usage=memory) - - def inspect_disk_rates(self, instance, duration=None): - vm_moid = self._ops.get_vm_moid(instance.id) - if not vm_moid: - raise virt_inspector.InstanceNotFoundException( - _('VM %s not found in VMware vSphere') % instance.id) - - disk_stats = {} - disk_ids = set() - disk_counters = [ - VC_DISK_READ_RATE_CNTR, - VC_DISK_READ_REQUESTS_RATE_CNTR, - VC_DISK_WRITE_RATE_CNTR, - VC_DISK_WRITE_REQUESTS_RATE_CNTR - ] - - for disk_counter in disk_counters: - disk_counter_id = self._ops.get_perf_counter_id(disk_counter) - disk_id_to_stat_map = self._ops.query_vm_device_stats( - vm_moid, disk_counter_id, duration) - disk_stats[disk_counter] = disk_id_to_stat_map - disk_ids.update(six.iterkeys(disk_id_to_stat_map)) - - for disk_id in disk_ids: - - def stat_val(counter_name): - return disk_stats[counter_name].get(disk_id, 0) - - disk = virt_inspector.Disk(device=disk_id) - # Stats provided from vSphere are in KB/s, converting it to B/s. - disk_rate_info = virt_inspector.DiskRateStats( - read_bytes_rate=stat_val(VC_DISK_READ_RATE_CNTR) * units.Ki, - read_requests_rate=stat_val(VC_DISK_READ_REQUESTS_RATE_CNTR), - write_bytes_rate=stat_val(VC_DISK_WRITE_RATE_CNTR) * units.Ki, - write_requests_rate=stat_val(VC_DISK_WRITE_REQUESTS_RATE_CNTR) - ) - yield(disk, disk_rate_info) diff --git a/ceilometer/compute/virt/vmware/vsphere_operations.py b/ceilometer/compute/virt/vmware/vsphere_operations.py deleted file mode 100644 index 16b92b8b..00000000 --- a/ceilometer/compute/virt/vmware/vsphere_operations.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_vmware import vim_util - - -PERF_MANAGER_TYPE = "PerformanceManager" -PERF_COUNTER_PROPERTY = "perfCounter" -VM_INSTANCE_ID_PROPERTY = 'config.extraConfig["nvp.vm-uuid"].value' - -# ESXi Servers sample performance data every 20 seconds. 20-second interval -# data is called instance data or real-time data. To retrieve instance data, -# we need to specify a value of 20 seconds for the "PerfQuerySpec.intervalId" -# property. In that case the "QueryPerf" method operates as a raw data feed -# that bypasses the vCenter database and instead retrieves performance data -# from an ESXi host. -# The following value is time interval for real-time performance stats -# in seconds and it is not configurable. -VC_REAL_TIME_SAMPLING_INTERVAL = 20 - - -class VsphereOperations(object): - """Class to invoke vSphere APIs calls. - - vSphere APIs calls are required by various pollsters, collecting data from - VMware infrastructure. - """ - def __init__(self, api_session, max_objects): - self._api_session = api_session - self._max_objects = max_objects - # Mapping between "VM's Nova instance Id" -> "VM's MOID" - # In case a VM is deployed by Nova, then its name is instance ID. - # So this map essentially has VM names as keys. - self._vm_moid_lookup_map = {} - - # Mapping from full name -> ID, for VC Performance counters - self._perf_counter_id_lookup_map = None - - def _init_vm_moid_lookup_map(self): - session = self._api_session - result = session.invoke_api(vim_util, "get_objects", session.vim, - "VirtualMachine", self._max_objects, - [VM_INSTANCE_ID_PROPERTY], - False) - while result: - for vm_object in result.objects: - vm_moid = vm_object.obj.value - # propSet will be set only if the server provides value - if hasattr(vm_object, 'propSet') and vm_object.propSet: - vm_instance_id = vm_object.propSet[0].val - if vm_instance_id: - self._vm_moid_lookup_map[vm_instance_id] = vm_moid - - result = session.invoke_api(vim_util, "continue_retrieval", - session.vim, result) - - def get_vm_moid(self, vm_instance_id): - """Method returns VC MOID of the VM by its NOVA instance ID.""" - if vm_instance_id not in self._vm_moid_lookup_map: - self._init_vm_moid_lookup_map() - - return self._vm_moid_lookup_map.get(vm_instance_id, None) - - def _init_perf_counter_id_lookup_map(self): - - # Query details of all the performance counters from VC - session = self._api_session - client_factory = session.vim.client.factory - perf_manager = session.vim.service_content.perfManager - - prop_spec = vim_util.build_property_spec( - client_factory, PERF_MANAGER_TYPE, [PERF_COUNTER_PROPERTY]) - - obj_spec = vim_util.build_object_spec( - client_factory, perf_manager, None) - - filter_spec = vim_util.build_property_filter_spec( - client_factory, [prop_spec], [obj_spec]) - - options = client_factory.create('ns0:RetrieveOptions') - options.maxObjects = 1 - - prop_collector = session.vim.service_content.propertyCollector - result = session.invoke_api(session.vim, "RetrievePropertiesEx", - prop_collector, specSet=[filter_spec], - options=options) - - perf_counter_infos = result.objects[0].propSet[0].val.PerfCounterInfo - - # Extract the counter Id for each counter and populate the map - self._perf_counter_id_lookup_map = {} - for perf_counter_info in perf_counter_infos: - - counter_group = perf_counter_info.groupInfo.key - counter_name = perf_counter_info.nameInfo.key - counter_rollup_type = perf_counter_info.rollupType - counter_id = perf_counter_info.key - - counter_full_name = (counter_group + ":" + counter_name + ":" + - counter_rollup_type) - self._perf_counter_id_lookup_map[counter_full_name] = counter_id - - def get_perf_counter_id(self, counter_full_name): - """Method returns the ID of VC performance counter by its full name. - - A VC performance counter is uniquely identified by the - tuple {'Group Name', 'Counter Name', 'Rollup Type'}. - It will have an id - counter ID (changes from one VC to another), - which is required to query performance stats from that VC. - This method returns the ID for a counter, - assuming 'CounterFullName' => 'Group Name:CounterName:RollupType'. - """ - if not self._perf_counter_id_lookup_map: - self._init_perf_counter_id_lookup_map() - return self._perf_counter_id_lookup_map[counter_full_name] - - # TODO(akhils@vmware.com) Move this method to common library - # when it gets checked-in - def query_vm_property(self, vm_moid, property_name): - """Method returns the value of specified property for a VM. - - :param vm_moid: moid of the VM whose property is to be queried - :param property_name: path of the property - """ - vm_mobj = vim_util.get_moref(vm_moid, "VirtualMachine") - session = self._api_session - return session.invoke_api(vim_util, "get_object_property", - session.vim, vm_mobj, property_name) - - def query_vm_aggregate_stats(self, vm_moid, counter_id, duration): - """Method queries the aggregated real-time stat value for a VM. - - This method should be used for aggregate counters. - - :param vm_moid: moid of the VM - :param counter_id: id of the perf counter in VC - :param duration: in seconds from current time, - over which the stat value was applicable - :return: the aggregated stats value for the counter - """ - # For aggregate counters, device_name should be "" - stats = self._query_vm_perf_stats(vm_moid, counter_id, "", duration) - - # Performance manager provides the aggregated stats value - # with device name -> None - return stats.get(None, 0) - - def query_vm_device_stats(self, vm_moid, counter_id, duration): - """Method queries the real-time stat values for a VM, for all devices. - - This method should be used for device(non-aggregate) counters. - - :param vm_moid: moid of the VM - :param counter_id: id of the perf counter in VC - :param duration: in seconds from current time, - over which the stat value was applicable - :return: a map containing the stat values keyed by the device ID/name - """ - # For device counters, device_name should be "*" to get stat values - # for all devices. - stats = self._query_vm_perf_stats(vm_moid, counter_id, "*", duration) - - # For some device counters, in addition to the per device value - # the Performance manager also returns the aggregated value. - # Just to be consistent, deleting the aggregated value if present. - stats.pop(None, None) - return stats - - def _query_vm_perf_stats(self, vm_moid, counter_id, device_name, duration): - """Method queries the real-time stat values for a VM. - - :param vm_moid: moid of the VM for which stats are needed - :param counter_id: id of the perf counter in VC - :param device_name: name of the device for which stats are to be - queried. For aggregate counters pass empty string (""). - For device counters pass "*", if stats are required over all - devices. - :param duration: in seconds from current time, - over which the stat value was applicable - :return: a map containing the stat values keyed by the device ID/name - """ - - session = self._api_session - client_factory = session.vim.client.factory - - # Construct the QuerySpec - metric_id = client_factory.create('ns0:PerfMetricId') - metric_id.counterId = counter_id - metric_id.instance = device_name - - query_spec = client_factory.create('ns0:PerfQuerySpec') - query_spec.entity = vim_util.get_moref(vm_moid, "VirtualMachine") - query_spec.metricId = [metric_id] - query_spec.intervalId = VC_REAL_TIME_SAMPLING_INTERVAL - # We query all samples which are applicable over the specified duration - samples_cnt = (int(duration / VC_REAL_TIME_SAMPLING_INTERVAL) - if duration and - duration >= VC_REAL_TIME_SAMPLING_INTERVAL else 1) - query_spec.maxSample = samples_cnt - - perf_manager = session.vim.service_content.perfManager - perf_stats = session.invoke_api(session.vim, 'QueryPerf', perf_manager, - querySpec=[query_spec]) - - stat_values = {} - if perf_stats: - entity_metric = perf_stats[0] - sample_infos = entity_metric.sampleInfo - - if len(sample_infos) > 0: - for metric_series in entity_metric.value: - # Take the average of all samples to improve the accuracy - # of the stat value - stat_value = float(sum(metric_series.value)) / samples_cnt - device_id = metric_series.id.instance - stat_values[device_id] = stat_value - - return stat_values diff --git a/ceilometer/compute/virt/xenapi/__init__.py b/ceilometer/compute/virt/xenapi/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/compute/virt/xenapi/inspector.py b/ceilometer/compute/virt/xenapi/inspector.py deleted file mode 100644 index 19405dd0..00000000 --- a/ceilometer/compute/virt/xenapi/inspector.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2014 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Implementation of Inspector abstraction for XenAPI.""" - -from oslo_config import cfg -from oslo_utils import units -import six.moves.urllib.parse as urlparse -try: - import XenAPI as api -except ImportError: - api = None - -from ceilometer.compute.pollsters import util -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.i18n import _ - -opt_group = cfg.OptGroup(name='xenapi', - title='Options for XenAPI') - -OPTS = [ - cfg.StrOpt('connection_url', - help='URL for connection to XenServer/Xen Cloud Platform.'), - cfg.StrOpt('connection_username', - default='root', - help='Username for connection to XenServer/Xen Cloud ' - 'Platform.'), - cfg.StrOpt('connection_password', - help='Password for connection to XenServer/Xen Cloud Platform.', - secret=True), -] - -CONF = cfg.CONF -CONF.register_group(opt_group) -CONF.register_opts(OPTS, group=opt_group) - - -class XenapiException(virt_inspector.InspectorException): - pass - - -def swap_xapi_host(url, host_addr): - """Replace the XenServer address present in 'url' with 'host_addr'.""" - temp_url = urlparse.urlparse(url) - # The connection URL is served by XAPI and doesn't support having a - # path for the connection url after the port. And username/password - # will be pass separately. So the URL like "http://abc:abc@abc:433/abc" - # should not appear for XAPI case. - temp_netloc = temp_url.netloc.replace(temp_url.hostname, '%s' % host_addr) - replaced = temp_url._replace(netloc=temp_netloc) - return urlparse.urlunparse(replaced) - - -def get_api_session(): - if not api: - raise ImportError(_('XenAPI not installed')) - - url = CONF.xenapi.connection_url - username = CONF.xenapi.connection_username - password = CONF.xenapi.connection_password - if not url or password is None: - raise XenapiException(_('Must specify connection_url, and ' - 'connection_password to use')) - - try: - session = (api.xapi_local() if url == 'unix://local' - else api.Session(url)) - session.login_with_password(username, password) - except api.Failure as e: - if e.details[0] == 'HOST_IS_SLAVE': - master = e.details[1] - url = swap_xapi_host(url, master) - try: - session = api.Session(url) - session.login_with_password(username, password) - except api.Failure as es: - raise XenapiException(_('Could not connect slave host: %s ') % - es.details[0]) - else: - msg = _("Could not connect to XenAPI: %s") % e.details[0] - raise XenapiException(msg) - return session - - -class XenapiInspector(virt_inspector.Inspector): - - def __init__(self): - super(XenapiInspector, self).__init__() - self.session = get_api_session() - - def _get_host_ref(self): - """Return the xenapi host on which nova-compute runs on.""" - return self.session.xenapi.session.get_this_host(self.session.handle) - - def _call_xenapi(self, method, *args): - return self.session.xenapi_request(method, args) - - def _lookup_by_name(self, instance_name): - vm_refs = self._call_xenapi("VM.get_by_name_label", instance_name) - n = len(vm_refs) - if n == 0: - raise virt_inspector.InstanceNotFoundException( - _('VM %s not found in XenServer') % instance_name) - elif n > 1: - raise XenapiException( - _('Multiple VM %s found in XenServer') % instance_name) - else: - return vm_refs[0] - - def inspect_cpu_util(self, instance, duration=None): - instance_name = util.instance_name(instance) - vm_ref = self._lookup_by_name(instance_name) - metrics_ref = self._call_xenapi("VM.get_metrics", vm_ref) - metrics_rec = self._call_xenapi("VM_metrics.get_record", - metrics_ref) - vcpus_number = metrics_rec['VCPUs_number'] - vcpus_utils = metrics_rec['VCPUs_utilisation'] - if len(vcpus_utils) == 0: - msg = _("Could not get VM %s CPU Utilization") % instance_name - raise XenapiException(msg) - - utils = 0.0 - for num in range(int(vcpus_number)): - utils += vcpus_utils.get(str(num)) - utils = utils / int(vcpus_number) * 100 - return virt_inspector.CPUUtilStats(util=utils) - - def inspect_memory_usage(self, instance, duration=None): - instance_name = util.instance_name(instance) - vm_ref = self._lookup_by_name(instance_name) - metrics_ref = self._call_xenapi("VM.get_metrics", vm_ref) - metrics_rec = self._call_xenapi("VM_metrics.get_record", - metrics_ref) - # Stat provided from XenServer is in B, converting it to MB. - memory = int(metrics_rec['memory_actual']) / units.Mi - return virt_inspector.MemoryUsageStats(usage=memory) - - def inspect_vnic_rates(self, instance, duration=None): - instance_name = util.instance_name(instance) - vm_ref = self._lookup_by_name(instance_name) - vif_refs = self._call_xenapi("VM.get_VIFs", vm_ref) - if vif_refs: - for vif_ref in vif_refs: - vif_rec = self._call_xenapi("VIF.get_record", vif_ref) - vif_metrics_ref = self._call_xenapi( - "VIF.get_metrics", vif_ref) - vif_metrics_rec = self._call_xenapi( - "VIF_metrics.get_record", vif_metrics_ref) - - interface = virt_inspector.Interface( - name=vif_rec['uuid'], - mac=vif_rec['MAC'], - fref=None, - parameters=None) - rx_rate = float(vif_metrics_rec['io_read_kbs']) * units.Ki - tx_rate = float(vif_metrics_rec['io_write_kbs']) * units.Ki - stats = virt_inspector.InterfaceRateStats(rx_rate, tx_rate) - yield (interface, stats) - - def inspect_disk_rates(self, instance, duration=None): - instance_name = util.instance_name(instance) - vm_ref = self._lookup_by_name(instance_name) - vbd_refs = self._call_xenapi("VM.get_VBDs", vm_ref) - if vbd_refs: - for vbd_ref in vbd_refs: - vbd_rec = self._call_xenapi("VBD.get_record", vbd_ref) - vbd_metrics_ref = self._call_xenapi("VBD.get_metrics", - vbd_ref) - vbd_metrics_rec = self._call_xenapi("VBD_metrics.get_record", - vbd_metrics_ref) - - disk = virt_inspector.Disk(device=vbd_rec['device']) - # Stats provided from XenServer are in KB/s, - # converting it to B/s. - read_rate = float(vbd_metrics_rec['io_read_kbs']) * units.Ki - write_rate = float(vbd_metrics_rec['io_write_kbs']) * units.Ki - disk_rate_info = virt_inspector.DiskRateStats( - read_bytes_rate=read_rate, - read_requests_rate=0, - write_bytes_rate=write_rate, - write_requests_rate=0) - yield(disk, disk_rate_info) diff --git a/ceilometer/coordination.py b/ceilometer/coordination.py deleted file mode 100644 index 2bb584bf..00000000 --- a/ceilometer/coordination.py +++ /dev/null @@ -1,229 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from oslo_config import cfg -from oslo_log import log -import retrying -import tooz.coordination - -from ceilometer.i18n import _LE, _LI, _LW -from ceilometer import utils - -LOG = log.getLogger(__name__) - -OPTS = [ - cfg.StrOpt('backend_url', - help='The backend URL to use for distributed coordination. If ' - 'left empty, per-deployment central agent and per-host ' - 'compute agent won\'t do workload ' - 'partitioning and will only function correctly if a ' - 'single instance of that service is running.'), - cfg.FloatOpt('heartbeat', - default=1.0, - help='Number of seconds between heartbeats for distributed ' - 'coordination.'), - cfg.FloatOpt('check_watchers', - default=10.0, - help='Number of seconds between checks to see if group ' - 'membership has changed'), - cfg.IntOpt('retry_backoff', - default=1, - help='Retry backoff factor when retrying to connect with' - 'coordination backend'), - cfg.IntOpt('max_retry_interval', - default=30, - help='Maximum number of seconds between retry to join ' - 'partitioning group') -] -cfg.CONF.register_opts(OPTS, group='coordination') - - -class ErrorJoiningPartitioningGroup(Exception): - def __init__(self): - super(ErrorJoiningPartitioningGroup, self).__init__(_LE( - 'Coordination join_group Error joining partitioning group')) - - -class MemberNotInGroupError(Exception): - def __init__(self, group_id, members, my_id): - super(MemberNotInGroupError, self).__init__(_LE( - 'Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: ' - 'Current agent is not part of group and cannot take tasks') % - {'group_id': group_id, 'members': members, 'me': my_id}) - - -def retry_on_error_joining_partition(exception): - return isinstance(exception, ErrorJoiningPartitioningGroup) - - -def retry_on_member_not_in_group(exception): - return isinstance(exception, MemberNotInGroupError) - - -class PartitionCoordinator(object): - """Workload partitioning coordinator. - - This class uses the `tooz` library to manage group membership. - - To ensure that the other agents know this agent is still alive, - the `heartbeat` method should be called periodically. - - Coordination errors and reconnects are handled under the hood, so the - service using the partition coordinator need not care whether the - coordination backend is down. The `extract_my_subset` will simply return an - empty iterable in this case. - """ - - def __init__(self, my_id=None): - self._coordinator = None - self._groups = set() - self._my_id = my_id or str(uuid.uuid4()) - - def start(self): - backend_url = cfg.CONF.coordination.backend_url - if backend_url: - try: - self._coordinator = tooz.coordination.get_coordinator( - backend_url, self._my_id) - self._coordinator.start() - LOG.info(_LI('Coordination backend started successfully.')) - except tooz.coordination.ToozError: - LOG.exception(_LE('Error connecting to coordination backend.')) - - def stop(self): - if not self._coordinator: - return - - for group in list(self._groups): - self.leave_group(group) - - try: - self._coordinator.stop() - except tooz.coordination.ToozError: - LOG.exception(_LE('Error connecting to coordination backend.')) - finally: - self._coordinator = None - - def is_active(self): - return self._coordinator is not None - - def heartbeat(self): - if self._coordinator: - if not self._coordinator.is_started: - # re-connect - self.start() - try: - self._coordinator.heartbeat() - except tooz.coordination.ToozError: - LOG.exception(_LE('Error sending a heartbeat to coordination ' - 'backend.')) - - def watch_group(self, namespace, callback): - if self._coordinator: - self._coordinator.watch_join_group(namespace, callback) - self._coordinator.watch_leave_group(namespace, callback) - - def run_watchers(self): - if self._coordinator: - self._coordinator.run_watchers() - - def join_group(self, group_id): - if (not self._coordinator or not self._coordinator.is_started - or not group_id): - return - - retry_backoff = cfg.CONF.coordination.retry_backoff * 1000 - max_retry_interval = cfg.CONF.coordination.max_retry_interval * 1000 - - @retrying.retry( - wait_exponential_multiplier=retry_backoff, - wait_exponential_max=max_retry_interval, - retry_on_exception=retry_on_error_joining_partition, - wrap_exception=True) - def _inner(): - try: - join_req = self._coordinator.join_group(group_id) - join_req.get() - LOG.info(_LI('Joined partitioning group %s'), group_id) - except tooz.coordination.MemberAlreadyExist: - return - except tooz.coordination.GroupNotCreated: - create_grp_req = self._coordinator.create_group(group_id) - try: - create_grp_req.get() - except tooz.coordination.GroupAlreadyExist: - pass - raise ErrorJoiningPartitioningGroup() - except tooz.coordination.ToozError: - LOG.exception(_LE('Error joining partitioning group %s,' - ' re-trying'), group_id) - raise ErrorJoiningPartitioningGroup() - self._groups.add(group_id) - - return _inner() - - def leave_group(self, group_id): - if group_id not in self._groups: - return - if self._coordinator: - self._coordinator.leave_group(group_id) - self._groups.remove(group_id) - LOG.info(_LI('Left partitioning group %s'), group_id) - - def _get_members(self, group_id): - if not self._coordinator: - return [self._my_id] - - while True: - get_members_req = self._coordinator.get_members(group_id) - try: - return get_members_req.get() - except tooz.coordination.GroupNotCreated: - self.join_group(group_id) - - @retrying.retry(stop_max_attempt_number=5, wait_random_max=2000, - retry_on_exception=retry_on_member_not_in_group) - def extract_my_subset(self, group_id, iterable, attempt=0): - """Filters an iterable, returning only objects assigned to this agent. - - We have a list of objects and get a list of active group members from - `tooz`. We then hash all the objects into buckets and return only - the ones that hashed into *our* bucket. - """ - if not group_id: - return iterable - if group_id not in self._groups: - self.join_group(group_id) - try: - members = self._get_members(group_id) - LOG.debug('Members of group: %s, Me: %s', members, self._my_id) - if self._my_id not in members: - LOG.warning(_LW('Cannot extract tasks because agent failed to ' - 'join group properly. Rejoining group.')) - self.join_group(group_id) - members = self._get_members(group_id) - if self._my_id not in members: - raise MemberNotInGroupError(group_id, members, self._my_id) - hr = utils.HashRing(members) - filtered = [v for v in iterable - if hr.get_node(str(v)) == self._my_id] - LOG.debug('My subset: %s', [str(f) for f in filtered]) - return filtered - except tooz.coordination.ToozError: - LOG.exception(_LE('Error getting group membership info from ' - 'coordination backend.')) - return [] diff --git a/ceilometer/declarative.py b/ceilometer/declarative.py deleted file mode 100644 index 47ebbe73..00000000 --- a/ceilometer/declarative.py +++ /dev/null @@ -1,188 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from jsonpath_rw_ext import parser -from oslo_config import cfg -from oslo_log import log -import six -import yaml - -from ceilometer.i18n import _, _LI - -LOG = log.getLogger(__name__) - - -class DefinitionException(Exception): - def __init__(self, message, definition_cfg): - msg = '%s %s: %s' % (self.__class__.__name__, definition_cfg, message) - super(DefinitionException, self).__init__(msg) - self.brief_message = message - - -class MeterDefinitionException(DefinitionException): - pass - - -class EventDefinitionException(DefinitionException): - pass - - -class ResourceDefinitionException(DefinitionException): - pass - - -class Definition(object): - JSONPATH_RW_PARSER = parser.ExtentedJsonPathParser() - GETTERS_CACHE = {} - - def __init__(self, name, cfg, plugin_manager): - self.cfg = cfg - self.name = name - self.plugin = None - if isinstance(cfg, dict): - if 'fields' not in cfg: - raise DefinitionException( - _("The field 'fields' is required for %s") % name, - self.cfg) - - if 'plugin' in cfg: - plugin_cfg = cfg['plugin'] - if isinstance(plugin_cfg, six.string_types): - plugin_name = plugin_cfg - plugin_params = {} - else: - try: - plugin_name = plugin_cfg['name'] - except KeyError: - raise DefinitionException( - _('Plugin specified, but no plugin name supplied ' - 'for %s') % name, self.cfg) - plugin_params = plugin_cfg.get('parameters') - if plugin_params is None: - plugin_params = {} - try: - plugin_ext = plugin_manager[plugin_name] - except KeyError: - raise DefinitionException( - _('No plugin named %(plugin)s available for ' - '%(name)s') % dict( - plugin=plugin_name, - name=name), self.cfg) - plugin_class = plugin_ext.plugin - self.plugin = plugin_class(**plugin_params) - - fields = cfg['fields'] - else: - # Simple definition "foobar: jsonpath" - fields = cfg - - if isinstance(fields, list): - # NOTE(mdragon): if not a string, we assume a list. - if len(fields) == 1: - fields = fields[0] - else: - fields = '|'.join('(%s)' % path for path in fields) - - if isinstance(fields, six.integer_types): - self.getter = fields - else: - try: - self.getter = self.make_getter(fields) - except Exception as e: - raise DefinitionException( - _("Parse error in JSONPath specification " - "'%(jsonpath)s' for %(name)s: %(err)s") - % dict(jsonpath=fields, name=name, err=e), self.cfg) - - def _get_path(self, match): - if match.context is not None: - for path_element in self._get_path(match.context): - yield path_element - yield str(match.path) - - def parse(self, obj, return_all_values=False): - if callable(self.getter): - values = self.getter(obj) - else: - return self.getter - - values = [match for match in values - if return_all_values or match.value is not None] - - if self.plugin is not None: - if return_all_values and not self.plugin.support_return_all_values: - raise DefinitionException("Plugin %s don't allows to " - "return multiple values" % - self.cfg["plugin"]["name"], self.cfg) - values_map = [('.'.join(self._get_path(match)), match.value) for - match in values] - values = [v for v in self.plugin.trait_values(values_map) - if v is not None] - else: - values = [match.value for match in values if match is not None] - if return_all_values: - return values - else: - return values[0] if values else None - - def make_getter(self, fields): - if fields in self.GETTERS_CACHE: - return self.GETTERS_CACHE[fields] - else: - getter = self.JSONPATH_RW_PARSER.parse(fields).find - self.GETTERS_CACHE[fields] = getter - return getter - - -def load_definitions(defaults, config_file, fallback_file=None): - """Setup a definitions from yaml config file.""" - - if not os.path.exists(config_file): - config_file = cfg.CONF.find_file(config_file) - if not config_file and fallback_file is not None: - LOG.debug("No Definitions configuration file found!" - "Using default config.") - config_file = fallback_file - - if config_file is not None: - LOG.debug("Loading definitions configuration file: %s", config_file) - - with open(config_file) as cf: - config = cf.read() - - try: - definition_cfg = yaml.safe_load(config) - except yaml.YAMLError as err: - if hasattr(err, 'problem_mark'): - mark = err.problem_mark - errmsg = (_("Invalid YAML syntax in Definitions file " - "%(file)s at line: %(line)s, column: %(column)s.") - % dict(file=config_file, - line=mark.line + 1, - column=mark.column + 1)) - else: - errmsg = (_("YAML error reading Definitions file " - "%(file)s") - % dict(file=config_file)) - LOG.error(errmsg) - raise - - else: - LOG.debug("No Definitions configuration file found!" - "Using default config.") - definition_cfg = defaults - - LOG.info(_LI("Definitions: %s"), definition_cfg) - return definition_cfg diff --git a/ceilometer/dispatcher/__init__.py b/ceilometer/dispatcher/__init__.py index bd8e42b8..d9cddb1f 100644 --- a/ceilometer/dispatcher/__init__.py +++ b/ceilometer/dispatcher/__init__.py @@ -16,26 +16,8 @@ import abc from oslo_config import cfg -from oslo_log import log import six -from stevedore import named -from ceilometer.i18n import _LW - - -LOG = log.getLogger(__name__) - -OPTS = [ - cfg.MultiStrOpt('meter_dispatchers', - deprecated_name='dispatcher', - default=['database'], - help='Dispatchers to process metering data.'), - cfg.MultiStrOpt('event_dispatchers', - default=['database'], - deprecated_name='dispatcher', - help='Dispatchers to process event data.'), -] -cfg.CONF.register_opts(OPTS) STORAGE_OPTS = [ cfg.IntOpt('max_retries', @@ -51,59 +33,11 @@ STORAGE_OPTS = [ cfg.CONF.register_opts(STORAGE_OPTS, group='storage') -def _load_dispatcher_manager(dispatcher_type): - namespace = 'ceilometer.dispatcher.%s' % dispatcher_type - conf_name = '%s_dispatchers' % dispatcher_type - - LOG.debug('loading dispatchers from %s', namespace) - # set propagate_map_exceptions to True to enable stevedore - # to propagate exceptions. - dispatcher_manager = named.NamedExtensionManager( - namespace=namespace, - names=getattr(cfg.CONF, conf_name), - invoke_on_load=True, - invoke_args=[cfg.CONF], - propagate_map_exceptions=True) - if not list(dispatcher_manager): - LOG.warning(_LW('Failed to load any dispatchers for %s'), - namespace) - return dispatcher_manager - - -def load_dispatcher_manager(): - return (_load_dispatcher_manager('meter'), - _load_dispatcher_manager('event')) - - -class Base(object): +@six.add_metaclass(abc.ABCMeta) +class EventDispatcherBase(object): def __init__(self, conf): self.conf = conf - -@six.add_metaclass(abc.ABCMeta) -class MeterDispatcherBase(Base): - @abc.abstractmethod - def record_metering_data(self, data): - """Recording metering data interface.""" - - def verify_and_record_metering_data(self, datapoints): - """Verify metering data's signature and record valid ones.""" - if not isinstance(datapoints, list): - datapoints = [datapoints] - - valid_datapoints = [] - for datapoint in datapoints: - if utils.verify_signature(datapoint, - self.conf.publisher.telemetry_secret): - valid_datapoints.append(datapoint) - else: - LOG.warning(_LW('Message signature is invalid, discarding ' - 'it: <%r>.'), datapoint) - return self.record_metering_data(valid_datapoints) - - -@six.add_metaclass(abc.ABCMeta) -class EventDispatcherBase(Base): @abc.abstractmethod def record_events(self, events): """Record events.""" diff --git a/ceilometer/dispatcher/database.py b/ceilometer/dispatcher/database.py index 6d734d05..aa383a74 100644 --- a/ceilometer/dispatcher/database.py +++ b/ceilometer/dispatcher/database.py @@ -24,8 +24,7 @@ from ceilometer import storage LOG = log.getLogger(__name__) -class DatabaseDispatcher(dispatcher.MeterDispatcherBase, - dispatcher.EventDispatcherBase): +class DatabaseDispatcher(dispatcher.EventDispatcherBase): """Dispatcher class for recording metering data into database. The dispatcher class which records each meter into a database configured @@ -41,59 +40,7 @@ class DatabaseDispatcher(dispatcher.MeterDispatcherBase, def __init__(self, conf): super(DatabaseDispatcher, self).__init__(conf) - - self._meter_conn = self._get_db_conn('metering', True) - self._event_conn = self._get_db_conn('event', True) - - def _get_db_conn(self, purpose, ignore_exception=False): - try: - return storage.get_connection_from_config(self.conf, purpose) - except Exception as err: - params = {"purpose": purpose, "err": err} - LOG.exception(_LE("Failed to connect to db, purpose %(purpose)s " - "re-try later: %(err)s") % params) - if not ignore_exception: - raise - - @property - def meter_conn(self): - if not self._meter_conn: - self._meter_conn = self._get_db_conn('metering') - - return self._meter_conn - - @property - def event_conn(self): - if not self._event_conn: - self._event_conn = self._get_db_conn('event') - - return self._event_conn - - def record_metering_data(self, data): - # We may have receive only one counter on the wire - if not isinstance(data, list): - data = [data] - - for meter in data: - LOG.debug( - 'metering data %(counter_name)s ' - 'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s', - {'counter_name': meter['counter_name'], - 'resource_id': meter['resource_id'], - 'timestamp': meter.get('timestamp', 'NO TIMESTAMP'), - 'counter_volume': meter['counter_volume']}) - try: - # Convert the timestamp to a datetime instance. - # Storage engines are responsible for converting - # that value to something they can store. - if meter.get('timestamp'): - ts = timeutils.parse_isotime(meter['timestamp']) - meter['timestamp'] = timeutils.normalize_time(ts) - self.meter_conn.record_metering_data(meter) - except Exception as err: - LOG.error(_LE('Failed to record metering data: %s.'), err) - # raise the exception to propagate it up in the chain. - raise + self.event_conn = storage.get_connection_from_config(self.conf) def record_events(self, events): if not isinstance(events, list): diff --git a/ceilometer/dispatcher/file.py b/ceilometer/dispatcher/file.py deleted file mode 100644 index a4da54d0..00000000 --- a/ceilometer/dispatcher/file.py +++ /dev/null @@ -1,85 +0,0 @@ -# -# Copyright 2013 IBM Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import logging.handlers - -from oslo_config import cfg - -from ceilometer import dispatcher - -OPTS = [ - cfg.StrOpt('file_path', - help='Name and the location of the file to record ' - 'meters.'), - cfg.IntOpt('max_bytes', - default=0, - help='The max size of the file.'), - cfg.IntOpt('backup_count', - default=0, - help='The max number of the files to keep.'), -] - -cfg.CONF.register_opts(OPTS, group="dispatcher_file") - - -class FileDispatcher(dispatcher.MeterDispatcherBase, - dispatcher.EventDispatcherBase): - """Dispatcher class for recording metering data to a file. - - The dispatcher class which logs each meter and/or event into a file - configured in ceilometer configuration file. An example configuration may - look like the following: - - [dispatcher_file] - file_path = /tmp/meters - - To enable this dispatcher, the following section needs to be present in - ceilometer.conf file - - [DEFAULT] - meter_dispatchers = file - event_dispatchers = file - """ - - def __init__(self, conf): - super(FileDispatcher, self).__init__(conf) - self.log = None - - # if the directory and path are configured, then log to the file - if self.conf.dispatcher_file.file_path: - dispatcher_logger = logging.Logger('dispatcher.file') - dispatcher_logger.setLevel(logging.INFO) - # create rotating file handler which logs meters - rfh = logging.handlers.RotatingFileHandler( - self.conf.dispatcher_file.file_path, - maxBytes=self.conf.dispatcher_file.max_bytes, - backupCount=self.conf.dispatcher_file.backup_count, - encoding='utf8') - - rfh.setLevel(logging.INFO) - # Only wanted the meters to be saved in the file, not the - # project root logger. - dispatcher_logger.propagate = False - dispatcher_logger.addHandler(rfh) - self.log = dispatcher_logger - - def record_metering_data(self, data): - if self.log: - self.log.info(data) - - def record_events(self, events): - if self.log: - self.log.info(events) diff --git a/ceilometer/dispatcher/gnocchi.py b/ceilometer/dispatcher/gnocchi.py deleted file mode 100644 index 4e2b011b..00000000 --- a/ceilometer/dispatcher/gnocchi.py +++ /dev/null @@ -1,469 +0,0 @@ -# -# Copyright 2014-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from collections import defaultdict -from hashlib import md5 -import itertools -import operator -import re -import threading -import uuid - -from gnocchiclient import client -from gnocchiclient import exceptions as gnocchi_exc -from gnocchiclient import utils as gnocchi_utils -from keystoneauth1 import exceptions as ka_exceptions -from keystoneauth1 import session as ka_session -from oslo_config import cfg -from oslo_log import log -from oslo_utils import fnmatch -import requests -import retrying -import six -from stevedore import extension - -from ceilometer import declarative -from ceilometer import dispatcher -from ceilometer.i18n import _, _LE, _LW -from ceilometer import keystone_client - -NAME_ENCODED = __name__.encode('utf-8') -CACHE_NAMESPACE = uuid.UUID(bytes=md5(NAME_ENCODED).digest()) -LOG = log.getLogger(__name__) - -dispatcher_opts = [ - cfg.BoolOpt('filter_service_activity', - default=True, - help='Filter out samples generated by Gnocchi ' - 'service activity'), - cfg.StrOpt('filter_project', - default='gnocchi', - help='Gnocchi project used to filter out samples ' - 'generated by Gnocchi service activity'), - cfg.StrOpt('url', - deprecated_for_removal=True, - help='URL to Gnocchi. default: autodetection'), - cfg.StrOpt('archive_policy', - help='The archive policy to use when the dispatcher ' - 'create a new metric.'), - cfg.StrOpt('resources_definition_file', - default='gnocchi_resources.yaml', - help=_('The Yaml file that defines mapping between samples ' - 'and gnocchi resources/metrics')), -] - -cfg.CONF.register_opts(dispatcher_opts, group="dispatcher_gnocchi") - - -def cache_key_mangler(key): - """Construct an opaque cache key.""" - if six.PY2: - key = key.encode('utf-8') - return uuid.uuid5(CACHE_NAMESPACE, key).hex - - -class ResourcesDefinition(object): - - MANDATORY_FIELDS = {'resource_type': six.string_types, - 'metrics': list} - - def __init__(self, definition_cfg, default_archive_policy, plugin_manager): - self._default_archive_policy = default_archive_policy - self.cfg = definition_cfg - - for field, field_type in self.MANDATORY_FIELDS.items(): - if field not in self.cfg: - raise declarative.ResourceDefinitionException( - _LE("Required field %s not specified") % field, self.cfg) - if not isinstance(self.cfg[field], field_type): - raise declarative.ResourceDefinitionException( - _LE("Required field %(field)s should be a %(type)s") % - {'field': field, 'type': field_type}, self.cfg) - - self._attributes = {} - for name, attr_cfg in self.cfg.get('attributes', {}).items(): - self._attributes[name] = declarative.Definition(name, attr_cfg, - plugin_manager) - - self.metrics = {} - for t in self.cfg['metrics']: - archive_policy = self.cfg.get('archive_policy', - self._default_archive_policy) - if archive_policy is None: - self.metrics[t] = {} - else: - self.metrics[t] = dict(archive_policy_name=archive_policy) - - def match(self, metric_name): - for t in self.cfg['metrics']: - if fnmatch.fnmatch(metric_name, t): - return True - return False - - def attributes(self, sample): - attrs = {} - for name, definition in self._attributes.items(): - value = definition.parse(sample) - if value is not None: - attrs[name] = value - return attrs - - -def get_gnocchiclient(conf): - requests_session = requests.session() - for scheme in requests_session.adapters.keys(): - requests_session.mount(scheme, ka_session.TCPKeepAliveAdapter( - pool_block=True)) - - session = keystone_client.get_session(requests_session=requests_session) - return client.Client('1', session, - interface=conf.service_credentials.interface, - region_name=conf.service_credentials.region_name, - endpoint_override=conf.dispatcher_gnocchi.url) - - -class LockedDefaultDict(defaultdict): - """defaultdict with lock to handle threading - - Dictionary only deletes if nothing is accessing dict and nothing is holding - lock to be deleted. If both cases are not true, it will skip delete. - """ - def __init__(self, *args, **kwargs): - self.lock = threading.Lock() - super(LockedDefaultDict, self).__init__(*args, **kwargs) - - def __getitem__(self, key): - with self.lock: - return super(LockedDefaultDict, self).__getitem__(key) - - def pop(self, key, *args): - with self.lock: - key_lock = super(LockedDefaultDict, self).__getitem__(key) - if key_lock.acquire(False): - try: - super(LockedDefaultDict, self).pop(key, *args) - finally: - key_lock.release() - - -class GnocchiDispatcher(dispatcher.MeterDispatcherBase): - """Dispatcher class for recording metering data into database. - - The dispatcher class records each meter into the gnocchi service - configured in ceilometer configuration file. An example configuration may - look like the following: - - [dispatcher_gnocchi] - url = http://localhost:8041 - archive_policy = low - - To enable this dispatcher, the following section needs to be present in - ceilometer.conf file - - [DEFAULT] - meter_dispatchers = gnocchi - """ - def __init__(self, conf): - super(GnocchiDispatcher, self).__init__(conf) - self.conf = conf - self.filter_service_activity = ( - conf.dispatcher_gnocchi.filter_service_activity) - self._ks_client = keystone_client.get_client() - self.resources_definition = self._load_resources_definitions(conf) - - self.cache = None - try: - import oslo_cache - oslo_cache.configure(self.conf) - # NOTE(cdent): The default cache backend is a real but - # noop backend. We don't want to use that here because - # we want to avoid the cache pathways entirely if the - # cache has not been configured explicitly. - if 'null' not in self.conf.cache.backend: - cache_region = oslo_cache.create_region() - self.cache = oslo_cache.configure_cache_region( - self.conf, cache_region) - self.cache.key_mangler = cache_key_mangler - except ImportError: - pass - except oslo_cache.exception.ConfigurationError as exc: - LOG.warning(_LW('unable to configure oslo_cache: %s') % exc) - - self._gnocchi_project_id = None - self._gnocchi_project_id_lock = threading.Lock() - self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock) - - self._gnocchi = get_gnocchiclient(conf) - # Convert retry_interval secs to msecs for retry decorator - retries = conf.storage.max_retries - - @retrying.retry(wait_fixed=conf.storage.retry_interval * 1000, - stop_max_attempt_number=(retries if retries >= 0 - else None)) - def _get_connection(): - self._gnocchi.capabilities.list() - - try: - _get_connection() - except Exception: - LOG.error(_LE('Failed to connect to Gnocchi.')) - raise - - @classmethod - def _load_resources_definitions(cls, conf): - plugin_manager = extension.ExtensionManager( - namespace='ceilometer.event.trait_plugin') - data = declarative.load_definitions( - {}, conf.dispatcher_gnocchi.resources_definition_file) - resource_defs = [] - for resource in data.get('resources', []): - try: - resource_defs.append(ResourcesDefinition( - resource, - conf.dispatcher_gnocchi.archive_policy, plugin_manager)) - except Exception as exc: - LOG.error(_LE("Failed to load resource due to error %s") % - exc) - return resource_defs - - @property - def gnocchi_project_id(self): - if self._gnocchi_project_id is not None: - return self._gnocchi_project_id - with self._gnocchi_project_id_lock: - if self._gnocchi_project_id is None: - try: - project = self._ks_client.projects.find( - name=self.conf.dispatcher_gnocchi.filter_project) - except ka_exceptions.NotFound: - LOG.warning(_LW('gnocchi project not found in keystone,' - ' ignoring the filter_service_activity ' - 'option')) - self.filter_service_activity = False - return None - except Exception: - LOG.exception('fail to retrieve user of Gnocchi service') - raise - self._gnocchi_project_id = project.id - LOG.debug("gnocchi project found: %s", self.gnocchi_project_id) - return self._gnocchi_project_id - - def _is_swift_account_sample(self, sample): - return bool([rd for rd in self.resources_definition - if rd.cfg['resource_type'] == 'swift_account' - and rd.match(sample['counter_name'])]) - - def _is_gnocchi_activity(self, sample): - return (self.filter_service_activity and self.gnocchi_project_id and ( - # avoid anything from the user used by gnocchi - sample['project_id'] == self.gnocchi_project_id or - # avoid anything in the swift account used by gnocchi - (sample['resource_id'] == self.gnocchi_project_id and - self._is_swift_account_sample(sample)) - )) - - def _get_resource_definition(self, metric_name): - for rd in self.resources_definition: - if rd.match(metric_name): - return rd - - def record_metering_data(self, data): - # We may have receive only one counter on the wire - if not isinstance(data, list): - data = [data] - # NOTE(sileht): skip sample generated by gnocchi itself - data = [s for s in data if not self._is_gnocchi_activity(s)] - - # FIXME(sileht): This method bulk the processing of samples - # grouped by resource_id and metric_name but this is not - # efficient yet because the data received here doesn't often - # contains a lot of different kind of samples - # So perhaps the next step will be to pool the received data from - # message bus. - data.sort(key=lambda s: (s['resource_id'], s['counter_name'])) - - resource_grouped_samples = itertools.groupby( - data, key=operator.itemgetter('resource_id')) - - gnocchi_data = {} - measures = {} - stats = dict(measures=0, resources=0, metrics=0) - for resource_id, samples_of_resource in resource_grouped_samples: - stats['resources'] += 1 - metric_grouped_samples = itertools.groupby( - list(samples_of_resource), - key=operator.itemgetter('counter_name')) - - # NOTE(sileht): We convert resource id to Gnocchi format - # because batch_resources_metrics_measures exception - # returns this id and not the ceilometer one - gnocchi_id = gnocchi_utils.encode_resource_id(resource_id) - res_info = gnocchi_data[gnocchi_id] = {} - for metric_name, samples in metric_grouped_samples: - stats['metrics'] += 1 - - samples = list(samples) - rd = self._get_resource_definition(metric_name) - if rd is None: - LOG.warning(_LW("metric %s is not handled by Gnocchi") % - metric_name) - continue - if rd.cfg.get("ignore"): - continue - - res_info['resource_type'] = rd.cfg['resource_type'] - res_info.setdefault("resource", {}).update({ - "id": resource_id, - "user_id": samples[0]['user_id'], - "project_id": samples[0]['project_id'], - "metrics": rd.metrics, - }) - - for sample in samples: - res_info.setdefault("resource_extra", {}).update( - rd.attributes(sample)) - m = measures.setdefault(gnocchi_id, {}).setdefault( - metric_name, []) - m.append({'timestamp': sample['timestamp'], - 'value': sample['counter_volume']}) - unit = sample['counter_unit'] - metric = sample['counter_name'] - res_info['resource']['metrics'][metric]['unit'] = unit - - stats['measures'] += len(measures[gnocchi_id][metric_name]) - res_info["resource"].update(res_info["resource_extra"]) - - try: - self.batch_measures(measures, gnocchi_data, stats) - except gnocchi_exc.ClientException as e: - LOG.error(six.text_type(e)) - except Exception as e: - LOG.error(six.text_type(e), exc_info=True) - - for gnocchi_id, info in gnocchi_data.items(): - resource = info["resource"] - resource_type = info["resource_type"] - resource_extra = info["resource_extra"] - if not resource_extra: - continue - try: - self._if_not_cached("update", resource_type, resource, - self._update_resource, resource_extra) - except gnocchi_exc.ClientException as e: - LOG.error(six.text_type(e)) - except Exception as e: - LOG.error(six.text_type(e), exc_info=True) - - RE_UNKNOW_METRICS = re.compile("Unknown metrics: (.*) \(HTTP 400\)") - RE_UNKNOW_METRICS_LIST = re.compile("([^/ ,]*)/([^,]*)") - - def batch_measures(self, measures, resource_infos, stats): - # NOTE(sileht): We don't care about error here, we want - # resources metadata always been updated - try: - self._gnocchi.metric.batch_resources_metrics_measures(measures) - except gnocchi_exc.BadRequest as e: - m = self.RE_UNKNOW_METRICS.match(six.text_type(e)) - if m is None: - raise - - # NOTE(sileht): Create all missing resources and metrics - metric_list = self.RE_UNKNOW_METRICS_LIST.findall(m.group(1)) - gnocchi_ids_freshly_handled = set() - for gnocchi_id, metric_name in metric_list: - if gnocchi_id in gnocchi_ids_freshly_handled: - continue - resource = resource_infos[gnocchi_id]['resource'] - resource_type = resource_infos[gnocchi_id]['resource_type'] - try: - self._if_not_cached("create", resource_type, resource, - self._create_resource) - except gnocchi_exc.ResourceAlreadyExists: - metric = {'resource_id': resource['id'], - 'name': metric_name} - metric.update(resource["metrics"][metric_name]) - try: - self._gnocchi.metric.create(metric) - except gnocchi_exc.NamedMetricAlreadyExists: - # NOTE(sileht): metric created in the meantime - pass - except gnocchi_exc.ClientException as e: - LOG.error(six.text_type(e)) - # We cannot post measures for this metric - del measures[gnocchi_id][metric_name] - if not measures[gnocchi_id]: - del measures[gnocchi_id] - except gnocchi_exc.ClientException as e: - LOG.error(six.text_type(e)) - # We cannot post measures for this resource - del measures[gnocchi_id] - gnocchi_ids_freshly_handled.add(gnocchi_id) - else: - gnocchi_ids_freshly_handled.add(gnocchi_id) - - # NOTE(sileht): we have created missing resources/metrics, - # now retry to post measures - self._gnocchi.metric.batch_resources_metrics_measures(measures) - - # FIXME(sileht): take care of measures removed in stats - LOG.debug("%(measures)d measures posted against %(metrics)d " - "metrics through %(resources)d resources", stats) - - def _create_resource(self, resource_type, resource): - self._gnocchi.resource.create(resource_type, resource) - LOG.debug('Resource %s created', resource["id"]) - - def _update_resource(self, resource_type, resource, resource_extra): - self._gnocchi.resource.update(resource_type, - resource["id"], - resource_extra) - LOG.debug('Resource %s updated', resource["id"]) - - def _if_not_cached(self, operation, resource_type, resource, method, - *args, **kwargs): - if self.cache: - cache_key = resource['id'] - attribute_hash = self._check_resource_cache(cache_key, resource) - hit = False - if attribute_hash: - with self._gnocchi_resource_lock[cache_key]: - # NOTE(luogangyi): there is a possibility that the - # resource was already built in cache by another - # ceilometer-collector when we get the lock here. - attribute_hash = self._check_resource_cache(cache_key, - resource) - if attribute_hash: - method(resource_type, resource, *args, **kwargs) - self.cache.set(cache_key, attribute_hash) - else: - hit = True - LOG.debug('resource cache recheck hit for ' - '%s %s', operation, cache_key) - self._gnocchi_resource_lock.pop(cache_key, None) - else: - hit = True - LOG.debug('Resource cache hit for %s %s', operation, cache_key) - if hit and operation == "create": - raise gnocchi_exc.ResourceAlreadyExists() - else: - method(resource_type, resource, *args, **kwargs) - - def _check_resource_cache(self, key, resource_data): - cached_hash = self.cache.get(key) - attribute_hash = hash(frozenset(filter(lambda x: x[0] != "metrics", - resource_data.items()))) - if not cached_hash or cached_hash != attribute_hash: - return attribute_hash - else: - return None diff --git a/ceilometer/dispatcher/http.py b/ceilometer/dispatcher/http.py deleted file mode 100644 index b69c473d..00000000 --- a/ceilometer/dispatcher/http.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright 2013 IBM Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from oslo_config import cfg -from oslo_log import log -import requests - -from ceilometer import dispatcher -from ceilometer.i18n import _LE - -LOG = log.getLogger(__name__) - -http_dispatcher_opts = [ - cfg.StrOpt('target', - default='', - help='The target where the http request will be sent. ' - 'If this is not set, no data will be posted. For ' - 'example: target = http://hostname:1234/path'), - cfg.StrOpt('event_target', - help='The target for event data where the http request ' - 'will be sent to. If this is not set, it will default ' - 'to same as Sample target.'), - cfg.IntOpt('timeout', - default=5, - help='The max time in seconds to wait for a request to ' - 'timeout.'), -] - -cfg.CONF.register_opts(http_dispatcher_opts, group="dispatcher_http") - - -class HttpDispatcher(dispatcher.MeterDispatcherBase, - dispatcher.EventDispatcherBase): - """Dispatcher class for posting metering/event data into a http target. - - To enable this dispatcher, the following option needs to be present in - ceilometer.conf file:: - - [DEFAULT] - meter_dispatchers = http - event_dispatchers = http - - Dispatcher specific options can be added as follows:: - - [dispatcher_http] - target = www.example.com - event_target = www.example.com - timeout = 2 - """ - - def __init__(self, conf): - super(HttpDispatcher, self).__init__(conf) - self.headers = {'Content-type': 'application/json'} - self.timeout = self.conf.dispatcher_http.timeout - self.target = self.conf.dispatcher_http.target - self.event_target = (self.conf.dispatcher_http.event_target or - self.target) - - def record_metering_data(self, data): - if self.target == '': - # if the target was not set, do not do anything - LOG.error(_LE('Dispatcher target was not set, no meter will ' - 'be posted. Set the target in the ceilometer.conf ' - 'file.')) - return - - # We may have receive only one counter on the wire - if not isinstance(data, list): - data = [data] - - for meter in data: - LOG.debug( - 'metering data %(counter_name)s ' - 'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s', - {'counter_name': meter['counter_name'], - 'resource_id': meter['resource_id'], - 'timestamp': meter.get('timestamp', 'NO TIMESTAMP'), - 'counter_volume': meter['counter_volume']}) - try: - # Every meter should be posted to the target - res = requests.post(self.target, - data=json.dumps(meter), - headers=self.headers, - timeout=self.timeout) - LOG.debug('Message posting finished with status code ' - '%d.', res.status_code) - except Exception as err: - LOG.exception(_LE('Failed to record metering data: %s.'), err) - - def record_events(self, events): - if not isinstance(events, list): - events = [events] - - for event in events: - res = None - try: - res = requests.post(self.event_target, data=event, - headers=self.headers, - timeout=self.timeout) - res.raise_for_status() - except Exception: - error_code = res.status_code if res else 'unknown' - LOG.exception(_LE('Status Code: %{code}s. Failed to' - 'dispatch event: %{event}s'), - {'code': error_code, 'event': event}) diff --git a/ceilometer/energy/__init__.py b/ceilometer/energy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/energy/kwapi.py b/ceilometer/energy/kwapi.py deleted file mode 100644 index 9bb8caa8..00000000 --- a/ceilometer/energy/kwapi.py +++ /dev/null @@ -1,124 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from keystoneauth1 import exceptions -from oslo_config import cfg -from oslo_log import log -import requests -import six - -from ceilometer.agent import plugin_base -from ceilometer import keystone_client -from ceilometer import sample - - -LOG = log.getLogger(__name__) - -SERVICE_OPTS = [ - cfg.StrOpt('kwapi', - default='energy', - help='Kwapi service type.'), -] - -cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') - - -class KwapiClient(object): - """Kwapi API client.""" - - def __init__(self, url, token=None): - """Initializes client.""" - self.url = url - self.token = token - - def iter_probes(self): - """Returns a list of dicts describing all probes.""" - probes_url = self.url + '/probes/' - headers = {} - if self.token is not None: - headers = {'X-Auth-Token': self.token} - timeout = cfg.CONF.http_timeout - request = requests.get(probes_url, headers=headers, timeout=timeout) - message = request.json() - probes = message['probes'] - for key, value in six.iteritems(probes): - probe_dict = value - probe_dict['id'] = key - yield probe_dict - - -class _Base(plugin_base.PollsterBase): - """Base class for the Kwapi pollster, derived from PollsterBase.""" - - @property - def default_discovery(self): - return 'endpoint:%s' % cfg.CONF.service_types.kwapi - - @staticmethod - def get_kwapi_client(ksclient, endpoint): - """Returns a KwapiClient configured with the proper url and token.""" - return KwapiClient(endpoint, keystone_client.get_auth_token(ksclient)) - - CACHE_KEY_PROBE = 'kwapi.probes' - - def _iter_probes(self, ksclient, cache, endpoint): - """Iterate over all probes.""" - key = '%s-%s' % (endpoint, self.CACHE_KEY_PROBE) - if key not in cache: - cache[key] = self._get_probes(ksclient, endpoint) - return iter(cache[key]) - - def _get_probes(self, ksclient, endpoint): - try: - client = self.get_kwapi_client(ksclient, endpoint) - except exceptions.EndpointNotFound: - LOG.debug("Kwapi endpoint not found") - return [] - return list(client.iter_probes()) - - -class EnergyPollster(_Base): - """Measures energy consumption.""" - def get_samples(self, manager, cache, resources): - """Returns all samples.""" - for endpoint in resources: - for probe in self._iter_probes(manager.keystone, cache, endpoint): - yield sample.Sample( - name='energy', - type=sample.TYPE_CUMULATIVE, - unit='kWh', - volume=probe['kwh'], - user_id=None, - project_id=None, - resource_id=probe['id'], - resource_metadata={} - ) - - -class PowerPollster(_Base): - """Measures power consumption.""" - def get_samples(self, manager, cache, resources): - """Returns all samples.""" - for endpoint in resources: - for probe in self._iter_probes(manager.keystone, cache, endpoint): - yield sample.Sample( - name='power', - type=sample.TYPE_GAUGE, - unit='W', - volume=probe['w'], - user_id=None, - project_id=None, - resource_id=probe['id'], - resource_metadata={} - ) diff --git a/ceilometer/event/converter.py b/ceilometer/event/converter.py deleted file mode 100644 index 6806909c..00000000 --- a/ceilometer/event/converter.py +++ /dev/null @@ -1,294 +0,0 @@ -# -# Copyright 2013 Rackspace Hosting. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log -from oslo_utils import fnmatch -from oslo_utils import timeutils -import six - -from ceilometer import declarative -from ceilometer.event.storage import models -from ceilometer.i18n import _ - -OPTS = [ - cfg.StrOpt('definitions_cfg_file', - default="event_definitions.yaml", - help="Configuration file for event definitions." - ), - cfg.BoolOpt('drop_unmatched_notifications', - default=False, - help='Drop notifications if no event definition matches. ' - '(Otherwise, we convert them with just the default traits)'), - cfg.MultiStrOpt('store_raw', - default=[], - help='Store the raw notification for select priority ' - 'levels (info and/or error). By default, raw details are ' - 'not captured.') -] - -cfg.CONF.register_opts(OPTS, group='event') - -LOG = log.getLogger(__name__) - - -class TraitDefinition(declarative.Definition): - def __init__(self, name, trait_cfg, plugin_manager): - super(TraitDefinition, self).__init__(name, trait_cfg, plugin_manager) - type_name = (trait_cfg.get('type', 'text') - if isinstance(trait_cfg, dict) else 'text') - self.trait_type = models.Trait.get_type_by_name(type_name) - if self.trait_type is None: - raise declarative.EventDefinitionException( - _("Invalid trait type '%(type)s' for trait %(trait)s") - % dict(type=type_name, trait=name), self.cfg) - - def to_trait(self, notification_body): - value = self.parse(notification_body) - if value is None: - return None - - # NOTE(mdragon): some openstack projects (mostly Nova) emit '' - # for null fields for things like dates. - if self.trait_type != models.Trait.TEXT_TYPE and value == '': - return None - - value = models.Trait.convert_value(self.trait_type, value) - return models.Trait(self.name, self.trait_type, value) - - -class EventDefinition(object): - - DEFAULT_TRAITS = dict( - service=dict(type='text', fields='publisher_id'), - request_id=dict(type='text', fields='_context_request_id'), - project_id=dict(type='text', fields=['payload.tenant_id', - '_context_tenant']), - user_id=dict(type='text', fields=['payload.user_id', - '_context_user_id']), - # TODO(dikonoor):tenant_id is old terminology and should - # be deprecated - tenant_id=dict(type='text', fields=['payload.tenant_id', - '_context_tenant']), - ) - - def __init__(self, definition_cfg, trait_plugin_mgr): - self._included_types = [] - self._excluded_types = [] - self.traits = dict() - self.cfg = definition_cfg - self.raw_levels = [level.lower() for level in cfg.CONF.event.store_raw] - - try: - event_type = definition_cfg['event_type'] - traits = definition_cfg['traits'] - except KeyError as err: - raise declarative.EventDefinitionException( - _("Required field %s not specified") % err.args[0], self.cfg) - - if isinstance(event_type, six.string_types): - event_type = [event_type] - - for t in event_type: - if t.startswith('!'): - self._excluded_types.append(t[1:]) - else: - self._included_types.append(t) - - if self._excluded_types and not self._included_types: - self._included_types.append('*') - - for trait_name in self.DEFAULT_TRAITS: - self.traits[trait_name] = TraitDefinition( - trait_name, - self.DEFAULT_TRAITS[trait_name], - trait_plugin_mgr) - for trait_name in traits: - self.traits[trait_name] = TraitDefinition( - trait_name, - traits[trait_name], - trait_plugin_mgr) - - def included_type(self, event_type): - for t in self._included_types: - if fnmatch.fnmatch(event_type, t): - return True - return False - - def excluded_type(self, event_type): - for t in self._excluded_types: - if fnmatch.fnmatch(event_type, t): - return True - return False - - def match_type(self, event_type): - return (self.included_type(event_type) - and not self.excluded_type(event_type)) - - @property - def is_catchall(self): - return '*' in self._included_types and not self._excluded_types - - @staticmethod - def _extract_when(body): - """Extract the generated datetime from the notification.""" - # NOTE: I am keeping the logic the same as it was in the collector, - # However, *ALL* notifications should have a 'timestamp' field, it's - # part of the notification envelope spec. If this was put here because - # some openstack project is generating notifications without a - # timestamp, then that needs to be filed as a bug with the offending - # project (mdragon) - when = body.get('timestamp', body.get('_context_timestamp')) - if when: - return timeutils.normalize_time(timeutils.parse_isotime(when)) - - return timeutils.utcnow() - - def to_event(self, notification_body): - event_type = notification_body['event_type'] - message_id = notification_body['message_id'] - when = self._extract_when(notification_body) - - traits = (self.traits[t].to_trait(notification_body) - for t in self.traits) - # Only accept non-None value traits ... - traits = [trait for trait in traits if trait is not None] - raw = (notification_body - if notification_body.get('priority') in self.raw_levels else {}) - event = models.Event(message_id, event_type, when, traits, raw) - return event - - -class NotificationEventsConverter(object): - """Notification Event Converter - - The NotificationEventsConverter handles the conversion of Notifications - from openstack systems into Ceilometer Events. - - The conversion is handled according to event definitions in a config file. - - The config is a list of event definitions. Order is significant, a - notification will be processed according to the LAST definition that - matches it's event_type. (We use the last matching definition because that - allows you to use YAML merge syntax in the definitions file.) - Each definition is a dictionary with the following keys (all are - required): - - - event_type: this is a list of notification event_types this definition - will handle. These can be wildcarded with unix shell glob (not regex!) - wildcards. - An exclusion listing (starting with a '!') will exclude any types listed - from matching. If ONLY exclusions are listed, the definition will match - anything not matching the exclusions. - This item can also be a string, which will be taken as equivalent to 1 - item list. - - Examples: - - * ['compute.instance.exists'] will only match - compute.instance.exists notifications - * "compute.instance.exists" Same as above. - * ["image.create", "image.delete"] will match - image.create and image.delete, but not anything else. - * "compute.instance.*" will match - compute.instance.create.start but not image.upload - * ['*.start','*.end', '!scheduler.*'] will match - compute.instance.create.start, and image.delete.end, - but NOT compute.instance.exists or - scheduler.run_instance.start - * '!image.*' matches any notification except image - notifications. - * ['*', '!image.*'] same as above. - - - traits: (dict) The keys are trait names, the values are the trait - definitions. Each trait definition is a dictionary with the following - keys: - - - type (optional): The data type for this trait. (as a string) - Valid options are: 'text', 'int', 'float' and 'datetime', defaults to - 'text' if not specified. - - fields: a path specification for the field(s) in the notification you - wish to extract. The paths can be specified with a dot syntax - (e.g. 'payload.host') or dictionary syntax (e.g. 'payload[host]') is - also supported. - In either case, if the key for the field you are looking for contains - special characters, like '.', it will need to be quoted (with double - or single quotes) like so:: - - "payload.image_meta.'org.openstack__1__architecture'" - - The syntax used for the field specification is a variant of JSONPath, - and is fairly flexible. - (see: https://github.com/kennknowles/python-jsonpath-rw for more info) - Specifications can be written to match multiple possible fields, the - value for the trait will be derived from the matching fields that - exist and have a non-null (i.e. is not None) values in the - notification. - By default the value will be the first such field. (plugins can alter - that, if they wish) - - This configuration value is normally a string, for convenience, it can - be specified as a list of specifications, which will be OR'ed together - (a union query in jsonpath terms) - - plugin (optional): (dictionary) with the following keys: - - - name: (string) name of a plugin to load - - parameters: (optional) Dictionary of keyword args to pass - to the plugin on initialization. See documentation on each plugin to - see what arguments it accepts. - - For convenience, this value can also be specified as a string, which is - interpreted as a plugin name, which will be loaded with no parameters. - - """ - - def __init__(self, events_config, trait_plugin_mgr, add_catchall=True): - self.definitions = [ - EventDefinition(event_def, trait_plugin_mgr) - for event_def in reversed(events_config)] - if add_catchall and not any(d.is_catchall for d in self.definitions): - event_def = dict(event_type='*', traits={}) - self.definitions.append(EventDefinition(event_def, - trait_plugin_mgr)) - - def to_event(self, notification_body): - event_type = notification_body['event_type'] - message_id = notification_body['message_id'] - edef = None - for d in self.definitions: - if d.match_type(event_type): - edef = d - break - - if edef is None: - msg = (_('Dropping Notification %(type)s (uuid:%(msgid)s)') - % dict(type=event_type, msgid=message_id)) - if cfg.CONF.event.drop_unmatched_notifications: - LOG.debug(msg) - else: - # If drop_unmatched_notifications is False, this should - # never happen. (mdragon) - LOG.error(msg) - return None - - return edef.to_event(notification_body) - - -def setup_events(trait_plugin_mgr): - """Setup the event definitions from yaml config file.""" - return NotificationEventsConverter( - declarative.load_definitions([], cfg.CONF.event.definitions_cfg_file), - trait_plugin_mgr, - add_catchall=not cfg.CONF.event.drop_unmatched_notifications) diff --git a/ceilometer/event/endpoint.py b/ceilometer/event/endpoint.py deleted file mode 100644 index bd78bc91..00000000 --- a/ceilometer/event/endpoint.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2012-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log -import oslo_messaging -from stevedore import extension - -from ceilometer.event import converter as event_converter -from ceilometer.i18n import _LE -from ceilometer import messaging - -LOG = log.getLogger(__name__) - - -class EventsNotificationEndpoint(object): - def __init__(self, manager): - super(EventsNotificationEndpoint, self).__init__() - LOG.debug('Loading event definitions') - self.event_converter = event_converter.setup_events( - extension.ExtensionManager( - namespace='ceilometer.event.trait_plugin')) - self.manager = manager - - def info(self, notifications): - """Convert message at info level to Ceilometer Event. - - :param notifications: list of notifications - """ - return self.process_notification('info', notifications) - - def error(self, notifications): - """Convert message at error level to Ceilometer Event. - - :param notifications: list of notifications - """ - return self.process_notification('error', notifications) - - def process_notification(self, priority, notifications): - for notification in notifications: - # NOTE: the rpc layer currently rips out the notification - # delivery_info, which is critical to determining the - # source of the notification. This will have to get added back - # later. - notification = messaging.convert_to_old_notification_format( - priority, notification) - try: - event = self.event_converter.to_event(notification) - if event is not None: - with self.manager.publisher() as p: - p(event) - except Exception: - if not cfg.CONF.notification.ack_on_event_error: - return oslo_messaging.NotificationResult.REQUEUE - LOG.error(_LE('Fail to process a notification'), exc_info=True) - return oslo_messaging.NotificationResult.HANDLED diff --git a/ceilometer/event/storage/impl_mongodb.py b/ceilometer/event/storage/impl_mongodb.py index 892ce4bf..11a7edaa 100644 --- a/ceilometer/event/storage/impl_mongodb.py +++ b/ceilometer/event/storage/impl_mongodb.py @@ -18,7 +18,6 @@ import pymongo from ceilometer.event.storage import pymongo_base from ceilometer import storage -from ceilometer.storage import impl_mongodb from ceilometer.storage.mongo import utils as pymongo_utils LOG = log.getLogger(__name__) @@ -52,6 +51,31 @@ class Connection(pymongo_base.Connection): # needed. self.upgrade() + @staticmethod + def update_ttl(ttl, ttl_index_name, index_field, coll): + """Update or create time_to_live indexes. + + :param ttl: time to live in seconds. + :param ttl_index_name: name of the index we want to update or create. + :param index_field: field with the index that we need to update. + :param coll: collection which indexes need to be updated. + """ + indexes = coll.index_information() + if ttl <= 0: + if ttl_index_name in indexes: + coll.drop_index(ttl_index_name) + return + + if ttl_index_name in indexes: + return coll.database.command( + 'collMod', coll.name, + index={'keyPattern': {index_field: pymongo.ASCENDING}, + 'expireAfterSeconds': ttl}) + + coll.create_index([(index_field, pymongo.ASCENDING)], + expireAfterSeconds=ttl, + name=ttl_index_name) + def upgrade(self): # create collection if not present if 'event' not in self.db.conn.collection_names(): @@ -65,8 +89,7 @@ class Connection(pymongo_base.Connection): name='event_type_idx' ) ttl = cfg.CONF.database.event_time_to_live - impl_mongodb.Connection.update_ttl(ttl, 'event_ttl', 'timestamp', - self.db.event) + self.update_ttl(ttl, 'event_ttl', 'timestamp', self.db.event) def clear(self): self.conn.drop_database(self.db.name) diff --git a/ceilometer/event/storage/impl_sqlalchemy.py b/ceilometer/event/storage/impl_sqlalchemy.py index b53e0f18..0326c1c6 100644 --- a/ceilometer/event/storage/impl_sqlalchemy.py +++ b/ceilometer/event/storage/impl_sqlalchemy.py @@ -15,7 +15,6 @@ from __future__ import absolute_import import datetime -import os from oslo_config import cfg from oslo_db import exception as dbexc @@ -137,12 +136,8 @@ class Connection(base.Connection): self._engine_facade = db_session.EngineFacade(url, **options) def upgrade(self): - # NOTE(gordc): to minimise memory, only import migration when needed - from oslo_db.sqlalchemy import migration - path = os.path.join(os.path.abspath(os.path.dirname(__file__)), - '..', '..', 'storage', 'sqlalchemy', - 'migrate_repo') - migration.db_sync(self._engine_facade.get_engine(), path) + engine = self._engine_facade.get_engine() + models.Base.metadata.create_all(engine) def clear(self): engine = self._engine_facade.get_engine() diff --git a/ceilometer/event/trait_plugins.py b/ceilometer/event/trait_plugins.py deleted file mode 100644 index a4b5fa1d..00000000 --- a/ceilometer/event/trait_plugins.py +++ /dev/null @@ -1,230 +0,0 @@ -# -# Copyright 2013 Rackspace Hosting. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from debtcollector import moves -from oslo_log import log -from oslo_utils import timeutils -import six - -from ceilometer.i18n import _LW - -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class TraitPluginBase(object): - """Base class for plugins. - - It converts notification fields to Trait values. - """ - - support_return_all_values = False - """If True, an exception will be raised if the user expect - the plugin to return one trait per match_list, but - the plugin doesn't allow/support that. - """ - - def __init__(self, **kw): - """Setup the trait plugin. - - For each Trait definition a plugin is used on in a conversion - definition, a new instance of the plugin will be created, and - initialized with the parameters (if any) specified in the - config file. - - :param kw: the parameters specified in the event definitions file. - - """ - super(TraitPluginBase, self).__init__() - - @moves.moved_method('trait_values', version=6.0, removal_version="?") - def trait_value(self, match_list): - pass - - def trait_values(self, match_list): - """Convert a set of fields to one or multiple Trait values. - - This method is called each time a trait is attempted to be extracted - from a notification. It will be called *even if* no matching fields - are found in the notification (in that case, the match_list will be - empty). If this method returns None, the trait *will not* be added to - the event. Any other value returned by this method will be used as - the value for the trait. Values returned will be coerced to the - appropriate type for the trait. - - :param match_list: A list (may be empty if no matches) of *tuples*. - Each tuple is (field_path, value) where field_path is the jsonpath - for that specific field. - - Example:: - - trait's fields definition: ['payload.foobar', - 'payload.baz', - 'payload.thing.*'] - notification body: - { - 'message_id': '12345', - 'publisher': 'someservice.host', - 'payload': { - 'foobar': 'test', - 'thing': { - 'bar': 12, - 'boing': 13, - } - } - } - match_list will be: [('payload.foobar','test'), - ('payload.thing.bar',12), - ('payload.thing.boing',13)] - - Here is a plugin that emulates the default (no plugin) behavior: - - .. code-block:: python - - class DefaultPlugin(TraitPluginBase): - "Plugin that returns the first field value." - - def __init__(self, **kw): - super(DefaultPlugin, self).__init__() - - def trait_value(self, match_list): - if not match_list: - return None - return [ match[1] for match in match_list] - """ - - # For backwards compatibility for the renamed method. - return [self.trait_value(match_list)] - - -class SplitterTraitPlugin(TraitPluginBase): - """Plugin that splits a piece off of a string value.""" - - support_return_all_values = True - - def __init__(self, separator=".", segment=0, max_split=None, **kw): - """Setup how do split the field. - - :param separator: String to split on. default "." - :param segment: Which segment to return. (int) default 0 - :param max_split: Limit number of splits. Default: None (no limit) - """ - LOG.warning(_LW('split plugin is deprecated, ' - 'add ".`split(%(sep)s, %(segment)d, ' - '%(max_split)d)`" to your jsonpath instead') % - dict(sep=separator, - segment=segment, - max_split=(-1 if max_split is None - else max_split))) - - self.separator = separator - self.segment = segment - self.max_split = max_split - super(SplitterTraitPlugin, self).__init__(**kw) - - def trait_values(self, match_list): - return [self._trait_value(match) - for match in match_list] - - def _trait_value(self, match): - value = six.text_type(match[1]) - if self.max_split is not None: - values = value.split(self.separator, self.max_split) - else: - values = value.split(self.separator) - try: - return values[self.segment] - except IndexError: - return None - - -class BitfieldTraitPlugin(TraitPluginBase): - """Plugin to set flags on a bitfield.""" - def __init__(self, initial_bitfield=0, flags=None, **kw): - """Setup bitfield trait. - - :param initial_bitfield: (int) initial value for the bitfield - Flags that are set will be OR'ed with this. - :param flags: List of dictionaries defining bitflags to set depending - on data in the notification. Each one has the following - keys: - path: jsonpath of field to match. - bit: (int) number of bit to set (lsb is bit 0) - value: set bit if corresponding field's value - matches this. If value is not provided, - bit will be set if the field exists (and - is non-null), regardless of its value. - - """ - self.initial_bitfield = initial_bitfield - if flags is None: - flags = [] - self.flags = flags - super(BitfieldTraitPlugin, self).__init__(**kw) - - def trait_values(self, match_list): - matches = dict(match_list) - bitfield = self.initial_bitfield - for flagdef in self.flags: - path = flagdef['path'] - bit = 2 ** int(flagdef['bit']) - if path in matches: - if 'value' in flagdef: - if matches[path] == flagdef['value']: - bitfield |= bit - else: - bitfield |= bit - return [bitfield] - - -class TimedeltaPluginMissedFields(Exception): - def __init__(self): - msg = ('It is required to use two timestamp field with Timedelta ' - 'plugin.') - super(TimedeltaPluginMissedFields, self).__init__(msg) - - -class TimedeltaPlugin(TraitPluginBase): - """Setup timedelta meter volume of two timestamps fields. - - Example:: - - trait's fields definition: ['payload.created_at', - 'payload.launched_at'] - value is been created as total seconds between 'launched_at' and - 'created_at' timestamps. - """ - # TODO(idegtiarov): refactor code to have meter_plugins separate from - # trait_plugins - - def trait_value(self, match_list): - if len(match_list) != 2: - LOG.warning(_LW('Timedelta plugin is required two timestamp fields' - ' to create timedelta value.')) - return - start, end = match_list - try: - start_time = timeutils.parse_isotime(start[1]) - end_time = timeutils.parse_isotime(end[1]) - except Exception as err: - LOG.warning(_LW('Failed to parse date from set fields, both ' - 'fields %(start)s and %(end)s must be datetime: ' - '%(err)s') % - dict(start=start[0], end=end[0], err=err) - ) - return - return abs((end_time - start_time).total_seconds()) diff --git a/ceilometer/exchange_control.py b/ceilometer/exchange_control.py deleted file mode 100644 index 717cdc12..00000000 --- a/ceilometer/exchange_control.py +++ /dev/null @@ -1,47 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -EXCHANGE_OPTS = [ - cfg.StrOpt('heat_control_exchange', - default='heat', - help="Exchange name for Heat notifications"), - cfg.StrOpt('glance_control_exchange', - default='glance', - help="Exchange name for Glance notifications."), - cfg.StrOpt('keystone_control_exchange', - default='keystone', - help="Exchange name for Keystone notifications."), - cfg.StrOpt('cinder_control_exchange', - default='cinder', - help="Exchange name for Cinder notifications."), - cfg.StrOpt('sahara_control_exchange', - default='sahara', - help="Exchange name for Data Processing notifications."), - cfg.StrOpt('swift_control_exchange', - default='swift', - help="Exchange name for Swift notifications."), - cfg.StrOpt('magnum_control_exchange', - default='magnum', - help="Exchange name for Magnum notifications."), - cfg.StrOpt('trove_control_exchange', - default='trove', - help="Exchange name for DBaaS notifications."), - cfg.StrOpt('zaqar_control_exchange', - default='zaqar', - help="Exchange name for Messaging service notifications."), - cfg.StrOpt('dns_control_exchange', - default='central', - help="Exchange name for DNS service notifications."), -] diff --git a/ceilometer/hardware/__init__.py b/ceilometer/hardware/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/hardware/discovery.py b/ceilometer/hardware/discovery.py deleted file mode 100644 index b15896c7..00000000 --- a/ceilometer/hardware/discovery.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- encoding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log -from oslo_utils import timeutils - -from ceilometer.agent import plugin_base -from ceilometer.i18n import _ -from ceilometer import nova_client - - -LOG = log.getLogger(__name__) - -OPTS = [ - cfg.StrOpt('url_scheme', - default='snmp://', - help='URL scheme to use for hardware nodes.'), - cfg.StrOpt('readonly_user_name', - default='ro_snmp_user', - help='SNMPd user name of all nodes running in the cloud.'), - cfg.StrOpt('readonly_user_password', - default='password', - help='SNMPd password of all the nodes running in the cloud.', - secret=True), -] -cfg.CONF.register_opts(OPTS, group='hardware') - - -class NodesDiscoveryTripleO(plugin_base.DiscoveryBase): - def __init__(self): - super(NodesDiscoveryTripleO, self).__init__() - self.nova_cli = nova_client.Client() - self.last_run = None - self.instances = {} - - @staticmethod - def _address(instance, field): - return instance.addresses['ctlplane'][0].get(field) - - def discover(self, manager, param=None): - """Discover resources to monitor. - - instance_get_all will return all instances if last_run is None, - and will return only the instances changed since the last_run time. - """ - try: - instances = self.nova_cli.instance_get_all(self.last_run) - except Exception: - # NOTE(zqfan): instance_get_all is wrapped and will log exception - # when there is any error. It is no need to raise it again and - # print one more time. - return [] - - for instance in instances: - if getattr(instance, 'OS-EXT-STS:vm_state', None) in ['deleted', - 'error']: - self.instances.pop(instance.id, None) - else: - self.instances[instance.id] = instance - self.last_run = timeutils.utcnow(True).isoformat() - - resources = [] - for instance in self.instances.values(): - try: - ip_address = self._address(instance, 'addr') - final_address = ( - cfg.CONF.hardware.url_scheme + - cfg.CONF.hardware.readonly_user_name + ':' + - cfg.CONF.hardware.readonly_user_password + '@' + - ip_address) - - resource = { - 'resource_id': instance.id, - 'resource_url': final_address, - 'mac_addr': self._address(instance, - 'OS-EXT-IPS-MAC:mac_addr'), - 'image_id': instance.image['id'], - 'flavor_id': instance.flavor['id'] - } - - resources.append(resource) - except KeyError: - LOG.error(_("Couldn't obtain IP address of " - "instance %s") % instance.id) - - return resources diff --git a/ceilometer/hardware/inspector/__init__.py b/ceilometer/hardware/inspector/__init__.py deleted file mode 100644 index 7e83d028..00000000 --- a/ceilometer/hardware/inspector/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from stevedore import driver - - -def get_inspector(parsed_url, namespace='ceilometer.hardware.inspectors'): - """Get inspector driver and load it. - - :param parsed_url: urlparse.SplitResult object for the inspector - :param namespace: Namespace to use to look for drivers. - """ - loaded_driver = driver.DriverManager(namespace, parsed_url.scheme) - return loaded_driver.driver() diff --git a/ceilometer/hardware/inspector/base.py b/ceilometer/hardware/inspector/base.py deleted file mode 100644 index 9963085a..00000000 --- a/ceilometer/hardware/inspector/base.py +++ /dev/null @@ -1,47 +0,0 @@ -# -# Copyright 2014 ZHAW SoE -# -# Authors: Lucas Graf -# Toni Zehnder -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Inspector abstraction for read-only access to hardware components""" - -import abc - -import six - - -@six.add_metaclass(abc.ABCMeta) -class Inspector(object): - @abc.abstractmethod - def inspect_generic(self, host, cache, extra_metadata, param): - """A generic inspect function. - - :param host: the target host - :param cache: cache passed from the pollster - :param extra_metadata: extra dict to be used as metadata - :param param: a dict of inspector specific param - :return: an iterator of (value, metadata, extra) - :return value: the sample value - :return metadata: dict to construct sample's metadata - :return extra: dict of extra metadata to help constructing sample - """ - - def prepare_params(self, param): - """Parse the params to a format which the inspector itself recognizes. - - :param param: inspector params from meter definition file - :return: a dict of param which the inspector recognized - """ - return {} diff --git a/ceilometer/hardware/inspector/snmp.py b/ceilometer/hardware/inspector/snmp.py deleted file mode 100644 index 88220658..00000000 --- a/ceilometer/hardware/inspector/snmp.py +++ /dev/null @@ -1,313 +0,0 @@ -# -# Copyright 2014 ZHAW SoE -# Copyright 2014 Intel Corp -# -# Authors: Lucas Graf -# Toni Zehnder -# Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Inspector for collecting data over SNMP""" - -import copy -from pysnmp.entity.rfc3413.oneliner import cmdgen - -import six - -from ceilometer.hardware.inspector import base - - -class SNMPException(Exception): - pass - - -def parse_snmp_return(ret, is_bulk=False): - """Check the return value of snmp operations - - :param ret: a tuple of (errorIndication, errorStatus, errorIndex, data) - returned by pysnmp - :param is_bulk: True if the ret value is from GetBulkRequest - :return: a tuple of (err, data) - err: True if error found, or False if no error found - data: a string of error description if error found, or the - actual return data of the snmp operation - """ - err = True - (errIndication, errStatus, errIdx, varBinds) = ret - if errIndication: - data = errIndication - elif errStatus: - if is_bulk: - varBinds = varBinds[-1] - data = "%s at %s" % (errStatus.prettyPrint(), - errIdx and varBinds[int(errIdx) - 1] or "?") - else: - err = False - data = varBinds - return err, data - - -EXACT = 'type_exact' -PREFIX = 'type_prefix' - - -class SNMPInspector(base.Inspector): - # Default port - _port = 161 - - _CACHE_KEY_OID = "snmp_cached_oid" - - # NOTE: The following mapping has been moved to the yaml file identified - # by the config options hardware.meter_definitions_file. However, we still - # keep the description here for code reading purpose. - - """ - - The following mapping define how to construct - (value, metadata, extra) returned by inspect_generic - MAPPING = { - 'identifier: { - 'matching_type': EXACT or PREFIX, - 'metric_oid': (oid, value_converter) - 'metadata': { - metadata_name1: (oid1, value_converter), - metadata_name2: (oid2, value_converter), - }, - 'post_op': special func to modify the return data, - }, - } - - For matching_type of EXACT, each item in the above mapping will - return exact one (value, metadata, extra) tuple. The value would be - returned from SNMP request GetRequest for oid of 'metric_oid', the - metadata dict would be constructed based on the returning from SNMP - GetRequest for oids of 'metadata'. - - For matching_type of PREFIX, SNMP request GetBulkRequest - would be sent to get values for oids of 'metric_oid' and - 'metadata' of each item in the above mapping. And each item might - return multiple (value, metadata, extra) tuples, e.g. - Suppose we have the following mapping: - MAPPING = { - 'disk.size.total': { - 'matching_type': PREFIX, - 'metric_oid': ("1.3.6.1.4.1.2021.9.1.6", int) - 'metadata': { - 'device': ("1.3.6.1.4.1.2021.9.1.3", str), - 'path': ("1.3.6.1.4.1.2021.9.1.2", str), - }, - 'post_op': None, - }, - and the SNMP have the following oid/value(s): - { - '1.3.6.1.4.1.2021.9.1.6.1': 19222656, - '1.3.6.1.4.1.2021.9.1.3.1': "/dev/sda2", - '1.3.6.1.4.1.2021.9.1.2.1': "/" - '1.3.6.1.4.1.2021.9.1.6.2': 808112, - '1.3.6.1.4.1.2021.9.1.3.2': "tmpfs", - '1.3.6.1.4.1.2021.9.1.2.2': "/run", - } - So here we'll return 2 instances of (value, metadata, extra): - (19222656, {'device': "/dev/sda2", 'path': "/"}, None) - (808112, {'device': "tmpfs", 'path': "/run"}, None) - - The post_op is assumed to be implemented by new metric developer. It - could be used to add additional special metadata(e.g. ip address), or - it could be used to add information into extra dict to be returned - to construct the pollster how to build final sample, e.g. - extra.update('project_id': xy, 'user_id': zw) - """ - - def __init__(self): - super(SNMPInspector, self).__init__() - self._cmdGen = cmdgen.CommandGenerator() - - def _query_oids(self, host, oids, cache, is_bulk): - # send GetRequest or GetBulkRequest to get oids values and - # populate the values into cache - authData = self._get_auth_strategy(host) - transport = cmdgen.UdpTransportTarget((host.hostname, - host.port or self._port)) - oid_cache = cache.setdefault(self._CACHE_KEY_OID, {}) - - if is_bulk: - ret = self._cmdGen.bulkCmd(authData, - transport, - 0, 100, - *oids, - lookupValues=True) - else: - ret = self._cmdGen.getCmd(authData, - transport, - *oids, - lookupValues=True) - (error, data) = parse_snmp_return(ret, is_bulk) - if error: - raise SNMPException("An error occurred, oids %(oid)s, " - "host %(host)s, %(err)s" % - dict(oid=oids, - host=host.hostname, - err=data)) - # save result into cache - if is_bulk: - for var_bind_table_row in data: - for name, val in var_bind_table_row: - oid_cache[str(name)] = val - else: - for name, val in data: - oid_cache[str(name)] = val - - @staticmethod - def find_matching_oids(oid_cache, oid, match_type, find_one=True): - matched = [] - if match_type == PREFIX: - for key in oid_cache.keys(): - if key.startswith(oid): - matched.append(key) - if find_one: - break - else: - if oid in oid_cache: - matched.append(oid) - return matched - - @staticmethod - def get_oid_value(oid_cache, oid_def, suffix=''): - oid, converter = oid_def - value = oid_cache[oid + suffix] - if converter: - value = converter(value) - return value - - @classmethod - def construct_metadata(cls, oid_cache, meta_defs, suffix=''): - metadata = {} - for key, oid_def in six.iteritems(meta_defs): - metadata[key] = cls.get_oid_value(oid_cache, oid_def, suffix) - return metadata - - @classmethod - def _find_missing_oids(cls, meter_def, cache): - # find oids have not been queried and cached - new_oids = [] - oid_cache = cache.setdefault(cls._CACHE_KEY_OID, {}) - # check metric_oid - if not cls.find_matching_oids(oid_cache, - meter_def['metric_oid'][0], - meter_def['matching_type']): - new_oids.append(meter_def['metric_oid'][0]) - for metadata in meter_def['metadata'].values(): - if not cls.find_matching_oids(oid_cache, - metadata[0], - meter_def['matching_type']): - new_oids.append(metadata[0]) - return new_oids - - def inspect_generic(self, host, cache, extra_metadata, param): - # the snmp definition for the corresponding meter - meter_def = param - # collect oids that needs to be queried - oids_to_query = self._find_missing_oids(meter_def, cache) - # query oids and populate into caches - if oids_to_query: - self._query_oids(host, oids_to_query, cache, - meter_def['matching_type'] == PREFIX) - # construct (value, metadata, extra) - oid_cache = cache[self._CACHE_KEY_OID] - # find all oids which needed to construct final sample values - # for matching type of EXACT, only 1 sample would be generated - # for matching type of PREFIX, multiple samples could be generated - oids_for_sample_values = self.find_matching_oids( - oid_cache, - meter_def['metric_oid'][0], - meter_def['matching_type'], - False) - input_extra_metadata = extra_metadata - - for oid in oids_for_sample_values: - suffix = oid[len(meter_def['metric_oid'][0]):] - value = self.get_oid_value(oid_cache, - meter_def['metric_oid'], - suffix) - # get the metadata for this sample value - metadata = self.construct_metadata(oid_cache, - meter_def['metadata'], - suffix) - extra_metadata = copy.deepcopy(input_extra_metadata) or {} - # call post_op for special cases - if meter_def['post_op']: - func = getattr(self, meter_def['post_op'], None) - if func: - value = func(host, cache, meter_def, - value, metadata, extra_metadata, - suffix) - yield (value, metadata, extra_metadata) - - def _post_op_memory_avail_to_used(self, host, cache, meter_def, - value, metadata, extra, suffix): - _memory_total_oid = "1.3.6.1.4.1.2021.4.5.0" - if _memory_total_oid not in cache[self._CACHE_KEY_OID]: - self._query_oids(host, [_memory_total_oid], cache, False) - value = int(cache[self._CACHE_KEY_OID][_memory_total_oid]) - value - return value - - def _post_op_net(self, host, cache, meter_def, - value, metadata, extra, suffix): - # add ip address into metadata - _interface_ip_oid = "1.3.6.1.2.1.4.20.1.2" - oid_cache = cache.setdefault(self._CACHE_KEY_OID, {}) - if not self.find_matching_oids(oid_cache, - _interface_ip_oid, - PREFIX): - # populate the oid into cache - self._query_oids(host, [_interface_ip_oid], cache, True) - ip_addr = '' - for k, v in six.iteritems(oid_cache): - if k.startswith(_interface_ip_oid) and v == int(suffix[1:]): - ip_addr = k.replace(_interface_ip_oid + ".", "") - metadata.update(ip=ip_addr) - # update resource_id for each nic interface - self._suffix_resource_id(host, metadata, 'name', extra) - return value - - def _post_op_disk(self, host, cache, meter_def, - value, metadata, extra, suffix): - self._suffix_resource_id(host, metadata, 'device', extra) - return value - - @staticmethod - def _suffix_resource_id(host, metadata, key, extra): - prefix = metadata.get(key) - if prefix: - res_id = extra.get('resource_id') or host.hostname - res_id = res_id + ".%s" % metadata.get(key) - extra.update(resource_id=res_id) - - @staticmethod - def _get_auth_strategy(host): - if host.password: - auth_strategy = cmdgen.UsmUserData(host.username, - authKey=host.password) - else: - auth_strategy = cmdgen.CommunityData(host.username or 'public') - return auth_strategy - - def prepare_params(self, param): - processed = {} - processed['matching_type'] = param['matching_type'] - processed['metric_oid'] = (param['oid'], eval(param['type'])) - processed['post_op'] = param.get('post_op', None) - processed['metadata'] = {} - for k, v in six.iteritems(param.get('metadata', {})): - processed['metadata'][k] = (v['oid'], eval(v['type'])) - return processed diff --git a/ceilometer/hardware/pollsters/__init__.py b/ceilometer/hardware/pollsters/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/hardware/pollsters/data/snmp.yaml b/ceilometer/hardware/pollsters/data/snmp.yaml deleted file mode 100644 index 60f84af1..00000000 --- a/ceilometer/hardware/pollsters/data/snmp.yaml +++ /dev/null @@ -1,189 +0,0 @@ ---- - -metric: -# cpu - - name: hardware.cpu.load.1min - unit: process - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.10.1.3.1" - type: "lambda x: float(str(x))" - - - name: hardware.cpu.load.5min - unit: process - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.10.1.3.2" - type: "lambda x: float(str(x))" - - - name: hardware.cpu.load.15min - unit: process - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.10.1.3.3" - type: "lambda x: float(str(x))" - - - name: hardware.cpu.util - unit: "%" - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.11.9.0" - type: "int" -# disk - - name: hardware.disk.size.total - unit: KB - type: gauge - snmp_inspector: - matching_type: "type_prefix" - oid: "1.3.6.1.4.1.2021.9.1.6" - type: "int" - metadata: &disk_metadata - path: - oid: "1.3.6.1.4.1.2021.9.1.2" - type: "str" - device: - oid: "1.3.6.1.4.1.2021.9.1.3" - type: "str" - post_op: "_post_op_disk" - - - name: hardware.disk.size.used - unit: KB - type: gauge - snmp_inspector: - matching_type: "type_prefix" - oid: "1.3.6.1.4.1.2021.9.1.8" - type: "int" - metadata: *disk_metadata - post_op: "_post_op_disk" -# memory - - name: hardware.memory.total - unit: KB - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.4.5.0" - type: "int" - - - name: hardware.memory.used - unit: KB - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.4.6.0" - type: "int" - post_op: "_post_op_memory_avail_to_used" - - - name: hardware.memory.swap.total - unit: KB - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.4.3.0" - type: "int" - - - name: hardware.memory.swap.avail - unit: KB - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.4.4.0" - type: "int" - - - name: hardware.memory.buffer - unit: KB - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.4.14.0" - type: "int" - - - name: hardware.memory.cached - unit: KB - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.4.15.0" - type: "int" -# network interface - - name: hardware.network.incoming.bytes - unit: B - type: cumulative - snmp_inspector: - matching_type: "type_prefix" - oid: "1.3.6.1.2.1.2.2.1.10" - type: "int" - metadata: &net_metadata - name: - oid: "1.3.6.1.2.1.2.2.1.2" - type: "str" - speed: - oid: "1.3.6.1.2.1.2.2.1.5" - type: "lambda x: int(x) / 8" - mac: - oid: "1.3.6.1.2.1.2.2.1.6" - type: "lambda x: x.prettyPrint().replace('0x', '')" - post_op: "_post_op_net" - - - name: hardware.network.outgoing.bytes - unit: B - type: cumulative - snmp_inspector: - matching_type: "type_prefix" - oid: "1.3.6.1.2.1.2.2.1.16" - type: "int" - metadata: *net_metadata - post_op: "_post_op_net" - - - name: hardware.network.outgoing.errors - unit: packet - type: cumulative - snmp_inspector: - matching_type: "type_prefix" - oid: "1.3.6.1.2.1.2.2.1.20" - type: "int" - metadata: *net_metadata - post_op: "_post_op_net" -#network aggregate - - name: hardware.network.ip.outgoing.datagrams - unit: datagrams - type: cumulative - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.2.1.4.10.0" - type: "int" - - - name: hardware.network.ip.incoming.datagrams - unit: datagrams - type: cumulative - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.2.1.4.3.0" - type: "int" -#system stats - - name: hardware.system_stats.cpu.idle - unit: "%" - type: gauge - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.11.11.0" - type: "int" - - - name: hardware.system_stats.io.outgoing.blocks - unit: blocks - type: cumulative - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.11.57.0" - type: "int" - - - name: hardware.system_stats.io.incoming.blocks - unit: blocks - type: cumulative - snmp_inspector: - matching_type: "type_exact" - oid: "1.3.6.1.4.1.2021.11.58.0" - type: "int" diff --git a/ceilometer/hardware/pollsters/generic.py b/ceilometer/hardware/pollsters/generic.py deleted file mode 100644 index 86245204..00000000 --- a/ceilometer/hardware/pollsters/generic.py +++ /dev/null @@ -1,218 +0,0 @@ -# -# Copyright 2015 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools -import pkg_resources - -from oslo_config import cfg -from oslo_log import log -from oslo_utils import netutils -import six - -from ceilometer.agent import plugin_base -from ceilometer import declarative -from ceilometer.hardware import inspector as insloader -from ceilometer.hardware.pollsters import util -from ceilometer.i18n import _LE, _LW -from ceilometer import sample - -OPTS = [ - cfg.StrOpt('meter_definitions_file', - default="snmp.yaml", - help="Configuration file for defining hardware snmp meters." - ), -] - -cfg.CONF.register_opts(OPTS, group='hardware') - -LOG = log.getLogger(__name__) - - -class MeterDefinition(object): - required_fields = ['name', 'unit', 'type'] - - def __init__(self, definition_cfg): - self.cfg = definition_cfg - for fname, fval in self.cfg.items(): - if (isinstance(fname, six.string_types) and - (fname in self.required_fields or - fname.endswith('_inspector'))): - setattr(self, fname, fval) - else: - LOG.warning(_LW("Ignore unrecognized field %s"), fname) - for fname in self.required_fields: - if not getattr(self, fname, None): - raise declarative.MeterDefinitionException( - _LE("Missing field %s") % fname, self.cfg) - if self.type not in sample.TYPES: - raise declarative.MeterDefinitionException( - _LE("Unrecognized type value %s") % self.type, self.cfg) - - -class GenericHardwareDeclarativePollster(plugin_base.PollsterBase): - CACHE_KEY = 'hardware.generic' - mapping = None - - def __init__(self): - super(GenericHardwareDeclarativePollster, self).__init__() - self.inspectors = {} - - def _update_meter_definition(self, definition): - self.meter_definition = definition - self.cached_inspector_params = {} - - @property - def default_discovery(self): - return 'tripleo_overcloud_nodes' - - @staticmethod - def _parse_resource(res): - """Parse resource from discovery. - - Either URL can be given or dict. Dict has to contain at least - keys 'resource_id' and 'resource_url', all the dict keys will be stored - as metadata. - - :param res: URL or dict containing all resource info. - :return parsed_url, resource_id, metadata: Returns parsed URL used for - SNMP query, unique identifier of the resource and metadata - of the resource. - """ - parsed_url, resource_id, metadata = (None, None, None) - if isinstance(res, dict): - if 'resource_url' not in res or 'resource_id' not in res: - LOG.error(_LE('Passed resource dict must contain keys ' - 'resource_id and resource_url.')) - else: - metadata = res - parsed_url = netutils.urlsplit(res['resource_url']) - resource_id = res['resource_id'] - else: - metadata = {} - parsed_url = netutils.urlsplit(res) - resource_id = res - - return parsed_url, resource_id, metadata - - def _get_inspector(self, parsed_url): - if parsed_url.scheme not in self.inspectors: - try: - driver = insloader.get_inspector(parsed_url) - self.inspectors[parsed_url.scheme] = driver - except Exception as err: - LOG.exception(_LE("Cannot load inspector %(name)s: %(err)s"), - dict(name=parsed_url.scheme, - err=err)) - raise err - return self.inspectors[parsed_url.scheme] - - def get_samples(self, manager, cache, resources=None): - """Return an iterable of Sample instances from polling the resources. - - :param manager: The service manager invoking the plugin - :param cache: A dictionary for passing data between plugins - :param resources: end point to poll data from - """ - resources = resources or [] - h_cache = cache.setdefault(self.CACHE_KEY, {}) - sample_iters = [] - - # Get the meter identifiers to poll - identifier = self.meter_definition.name - - for resource in resources: - parsed_url, res, extra_metadata = self._parse_resource(resource) - if parsed_url is None: - LOG.error(_LE("Skip invalid resource %s"), resource) - continue - ins = self._get_inspector(parsed_url) - try: - # Call hardware inspector to poll for the data - i_cache = h_cache.setdefault(res, {}) - - # Prepare inspector parameters and cache it for performance - param_key = parsed_url.scheme + '.' + identifier - inspector_param = self.cached_inspector_params.get(param_key) - if not inspector_param: - param = getattr(self.meter_definition, - parsed_url.scheme + '_inspector', {}) - inspector_param = ins.prepare_params(param) - self.cached_inspector_params[param_key] = inspector_param - - if identifier not in i_cache: - i_cache[identifier] = list(ins.inspect_generic( - host=parsed_url, - cache=i_cache, - extra_metadata=extra_metadata, - param=inspector_param)) - # Generate samples - if i_cache[identifier]: - sample_iters.append(self.generate_samples( - parsed_url, - i_cache[identifier])) - except Exception as err: - LOG.exception(_LE('inspector call failed for %(ident)s ' - 'host %(host)s: %(err)s'), - dict(ident=identifier, - host=parsed_url.hostname, - err=err)) - return itertools.chain(*sample_iters) - - def generate_samples(self, host_url, data): - """Generate a list of Sample from the data returned by inspector - - :param host_url: host url of the endpoint - :param data: list of data returned by the corresponding inspector - """ - samples = [] - definition = self.meter_definition - for (value, metadata, extra) in data: - s = util.make_sample_from_host(host_url, - name=definition.name, - sample_type=definition.type, - unit=definition.unit, - volume=value, - res_metadata=metadata, - extra=extra, - name_prefix=None) - samples.append(s) - return samples - - @classmethod - def build_pollsters(cls): - if not cls.mapping: - definition_cfg = declarative.load_definitions( - {}, cfg.CONF.hardware.meter_definitions_file, - pkg_resources.resource_filename(__name__, "data/snmp.yaml")) - cls.mapping = load_definition(definition_cfg) - - pollsters = [] - for name in cls.mapping: - pollster = cls() - pollster._update_meter_definition(cls.mapping[name]) - pollsters.append((name, pollster)) - return pollsters - - -def load_definition(config_def): - mappings = {} - for meter_def in config_def.get('metric', []): - try: - meter = MeterDefinition(meter_def) - mappings[meter.name] = meter - except declarative.DefinitionException as e: - errmsg = _LE("Error loading meter definition: %s") - LOG.error(errmsg, e.brief_message) - return mappings diff --git a/ceilometer/hardware/pollsters/util.py b/ceilometer/hardware/pollsters/util.py deleted file mode 100644 index 5a68c658..00000000 --- a/ceilometer/hardware/pollsters/util.py +++ /dev/null @@ -1,63 +0,0 @@ -# -# Copyright 2013 ZHAW SoE -# Copyright 2014 Intel Corp. -# -# Authors: Lucas Graf -# Toni Zehnder -# Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from six.moves.urllib import parse as urlparse - -from ceilometer import sample - - -def get_metadata_from_host(host_url): - return {'resource_url': urlparse.urlunsplit(host_url)} - - -def make_resource_metadata(res_metadata=None, host_url=None): - resource_metadata = dict() - if res_metadata is not None: - metadata = copy.copy(res_metadata) - resource_metadata.update(metadata) - resource_metadata.update(get_metadata_from_host(host_url)) - return resource_metadata - - -def make_sample_from_host(host_url, name, sample_type, unit, volume, - project_id=None, user_id=None, resource_id=None, - res_metadata=None, extra=None, - name_prefix='hardware'): - - extra = extra or {} - resource_metadata = make_resource_metadata(res_metadata, host_url) - resource_metadata.update(extra) - - res_id = resource_id or extra.get('resource_id') or host_url.hostname - if name_prefix: - name = name_prefix + '.' + name - return sample.Sample( - name=name, - type=sample_type, - unit=unit, - volume=volume, - user_id=user_id or extra.get('user_id'), - project_id=project_id or extra.get('project_id'), - resource_id=res_id, - resource_metadata=resource_metadata, - source='hardware', - ) diff --git a/ceilometer/image/__init__.py b/ceilometer/image/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/image/glance.py b/ceilometer/image/glance.py deleted file mode 100644 index e25aaedd..00000000 --- a/ceilometer/image/glance.py +++ /dev/null @@ -1,129 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Common code for working with images -""" - -from __future__ import absolute_import - -import glanceclient -from oslo_config import cfg - -from ceilometer.agent import plugin_base -from ceilometer import keystone_client -from ceilometer import sample - - -OPTS = [ - cfg.IntOpt('glance_page_size', - default=0, - help="Number of items to request in " - "each paginated Glance API request " - "(parameter used by glanceclient). " - "If this is less than or equal to 0, " - "page size is not specified " - "(default value in glanceclient is used)."), -] - -SERVICE_OPTS = [ - cfg.StrOpt('glance', - default='image', - help='Glance service type.'), -] - -cfg.CONF.register_opts(OPTS) -cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') - - -class _Base(plugin_base.PollsterBase): - - @property - def default_discovery(self): - return 'endpoint:%s' % cfg.CONF.service_types.glance - - @staticmethod - def get_glance_client(ksclient, endpoint): - # hard-code v1 glance API version selection while v2 API matures - return glanceclient.Client('1', - session=keystone_client.get_session(), - endpoint=endpoint, - auth=ksclient.session.auth) - - def _get_images(self, ksclient, endpoint): - client = self.get_glance_client(ksclient, endpoint) - page_size = cfg.CONF.glance_page_size - kwargs = {} - if page_size > 0: - kwargs['page_size'] = page_size - return client.images.list(filters={"is_public": None}, **kwargs) - - def _iter_images(self, ksclient, cache, endpoint): - """Iterate over all images.""" - key = '%s-images' % endpoint - if key not in cache: - cache[key] = list(self._get_images(ksclient, endpoint)) - return iter(cache[key]) - - @staticmethod - def extract_image_metadata(image): - return dict((k, getattr(image, k)) - for k in - [ - "status", - "is_public", - "name", - "deleted", - "container_format", - "created_at", - "disk_format", - "updated_at", - "properties", - "min_disk", - "protected", - "checksum", - "deleted_at", - "min_ram", - "size", ]) - - -class ImagePollster(_Base): - def get_samples(self, manager, cache, resources): - for endpoint in resources: - for image in self._iter_images(manager.keystone, cache, endpoint): - yield sample.Sample( - name='image', - type=sample.TYPE_GAUGE, - unit='image', - volume=1, - user_id=None, - project_id=image.owner, - resource_id=image.id, - resource_metadata=self.extract_image_metadata(image), - ) - - -class ImageSizePollster(_Base): - def get_samples(self, manager, cache, resources): - for endpoint in resources: - for image in self._iter_images(manager.keystone, cache, endpoint): - yield sample.Sample( - name='image.size', - type=sample.TYPE_GAUGE, - unit='B', - volume=image.size, - user_id=None, - project_id=image.owner, - resource_id=image.id, - resource_metadata=self.extract_image_metadata(image), - ) diff --git a/ceilometer/ipmi/__init__.py b/ceilometer/ipmi/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/ipmi/notifications/__init__.py b/ceilometer/ipmi/notifications/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/ipmi/notifications/ironic.py b/ceilometer/ipmi/notifications/ironic.py deleted file mode 100644 index 51a00fef..00000000 --- a/ceilometer/ipmi/notifications/ironic.py +++ /dev/null @@ -1,174 +0,0 @@ -# -# Copyright 2014 Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Converters for producing hardware sensor data sample messages from -notification events. -""" - -from oslo_config import cfg -from oslo_log import log -import oslo_messaging as messaging - -from ceilometer.agent import plugin_base -from ceilometer import sample - -LOG = log.getLogger(__name__) - -OPTS = [ - cfg.StrOpt('ironic_exchange', - default='ironic', - help='Exchange name for Ironic notifications.'), -] - - -cfg.CONF.register_opts(OPTS) - - -# Map unit name to SI -UNIT_MAP = { - 'Watts': 'W', - 'Volts': 'V', -} - - -def validate_reading(data): - """Some sensors read "Disabled".""" - return data != 'Disabled' - - -def transform_id(data): - return data.lower().replace(' ', '_') - - -def parse_reading(data): - try: - volume, unit = data.split(' ', 1) - unit = unit.rsplit(' ', 1)[-1] - return float(volume), UNIT_MAP.get(unit, unit) - except ValueError: - raise InvalidSensorData('unable to parse sensor reading: %s' % - data) - - -class InvalidSensorData(ValueError): - pass - - -class SensorNotification(plugin_base.NotificationBase): - """A generic class for extracting samples from sensor data notifications. - - A notification message can contain multiple samples from multiple - sensors, all with the same basic structure: the volume for the sample - is found as part of the value of a 'Sensor Reading' key. The unit - is in the same value. - - Subclasses exist solely to allow flexibility with stevedore configuration. - """ - - event_types = ['hardware.ipmi.*'] - metric = None - - def get_targets(self, conf): - """oslo.messaging.TargetS for this plugin.""" - return [messaging.Target(topic=topic, - exchange=conf.ironic_exchange) - for topic in self.get_notification_topics(conf)] - - def _get_sample(self, message): - try: - return (payload for _, payload - in message['payload'][self.metric].items()) - except KeyError: - return [] - - @staticmethod - def _package_payload(message, payload): - # NOTE(chdent): How much of the payload should we keep? - payload['node'] = message['payload']['node_uuid'] - info = {'publisher_id': message['publisher_id'], - 'timestamp': message['payload']['timestamp'], - 'event_type': message['payload']['event_type'], - 'user_id': message['payload'].get('user_id'), - 'project_id': message['payload'].get('project_id'), - 'payload': payload} - return info - - def process_notification(self, message): - """Read and process a notification. - - The guts of a message are in dict value of a 'payload' key - which then itself has a payload key containing a dict of - multiple sensor readings. - - If expected keys in the payload are missing or values - are not in the expected form for transformations, - KeyError and ValueError are caught and the current - sensor payload is skipped. - """ - payloads = self._get_sample(message['payload']) - for payload in payloads: - try: - # Provide a fallback resource_id in case parts are missing. - resource_id = 'missing id' - try: - resource_id = '%(nodeid)s-%(sensorid)s' % { - 'nodeid': message['payload']['node_uuid'], - 'sensorid': transform_id(payload['Sensor ID']) - } - except KeyError as exc: - raise InvalidSensorData('missing key in payload: %s' % exc) - - info = self._package_payload(message, payload) - - try: - sensor_reading = info['payload']['Sensor Reading'] - except KeyError as exc: - raise InvalidSensorData( - "missing 'Sensor Reading' in payload" - ) - - if validate_reading(sensor_reading): - volume, unit = parse_reading(sensor_reading) - yield sample.Sample.from_notification( - name='hardware.ipmi.%s' % self.metric.lower(), - type=sample.TYPE_GAUGE, - unit=unit, - volume=volume, - resource_id=resource_id, - message=info, - user_id=info['user_id'], - project_id=info['project_id']) - - except InvalidSensorData as exc: - LOG.warning( - 'invalid sensor data for %(resource)s: %(error)s' % - dict(resource=resource_id, error=exc) - ) - continue - - -class TemperatureSensorNotification(SensorNotification): - metric = 'Temperature' - - -class CurrentSensorNotification(SensorNotification): - metric = 'Current' - - -class FanSensorNotification(SensorNotification): - metric = 'Fan' - - -class VoltageSensorNotification(SensorNotification): - metric = 'Voltage' diff --git a/ceilometer/ipmi/platform/__init__.py b/ceilometer/ipmi/platform/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/ipmi/platform/exception.py b/ceilometer/ipmi/platform/exception.py deleted file mode 100644 index bc8c13dc..00000000 --- a/ceilometer/ipmi/platform/exception.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2014 Intel Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -class NodeManagerException(Exception): - pass - - -class IPMIException(Exception): - pass diff --git a/ceilometer/ipmi/platform/intel_node_manager.py b/ceilometer/ipmi/platform/intel_node_manager.py deleted file mode 100644 index 21da987a..00000000 --- a/ceilometer/ipmi/platform/intel_node_manager.py +++ /dev/null @@ -1,342 +0,0 @@ -# Copyright 2014 Intel Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Node manager engine to collect power and temperature of compute node. - -Intel Node Manager Technology enables the datacenter IT to monitor and control -actual server power, thermal and compute utilization behavior through industry -defined standard IPMI. This file provides Node Manager engine to get simple -system power and temperature data based on ipmitool. -""" - -import binascii -import collections -import tempfile -import time - -from oslo_config import cfg -import six - -from ceilometer.i18n import _ -from ceilometer.ipmi.platform import exception as nmexcept -from ceilometer.ipmi.platform import ipmitool - - -OPTS = [ - cfg.IntOpt('node_manager_init_retry', - default=3, - help='Number of retries upon Intel Node ' - 'Manager initialization failure') -] - - -CONF = cfg.CONF -CONF.register_opts(OPTS, group='ipmi') - -IPMICMD = {"sdr_dump": "sdr dump", - "sdr_info": "sdr info", - "sensor_dump": "sdr -v"} -IPMIRAWCMD = {"get_device_id": "raw 0x06 0x01", - "get_nm_version": "raw 0x2e 0xca 0x57 0x01 0x00", - "init_sensor_agent": "raw 0x0a 0x2c 0x01", - "init_complete": "raw 0x0a 0x2c 0x00", - "init_sensor_agent_status": "raw 0x0a 0x2c 0x00", - "read_power_all": "raw 0x2e 0xc8 0x57 0x01 0x00 0x01 0x00 0x00", - "read_inlet_temperature": - "raw 0x2e 0xc8 0x57 0x01 0x00 0x02 0x00 0x00", - "read_outlet_temperature": - "raw 0x2e 0xc8 0x57 0x01 0x00 0x05 0x00 0x00", - "read_airflow": "raw 0x2e 0xc8 0x57 0x01 0x00 0x04 0x00 0x00", - "read_cups_utilization": "raw 0x2e 0x65 0x57 0x01 0x00 0x05", - "read_cups_index": "raw 0x2e 0x65 0x57 0x01 0x00 0x01"} - -MANUFACTURER_ID_INTEL = ['57', '01', '00'] -INTEL_PREFIX = '5701000d01' - -# The template dict are made according to the spec. It contains the expected -# length of each item. And it can be used to parse the output of IPMI command. - -ONE_RETURN_TEMPLATE = {"ret": 1} - -BMC_INFO_TEMPLATE = collections.OrderedDict() -BMC_INFO_TEMPLATE['Device_ID'] = 1 -BMC_INFO_TEMPLATE['Device_Revision'] = 1 -BMC_INFO_TEMPLATE['Firmware_Revision_1'] = 1 -BMC_INFO_TEMPLATE['Firmware_Revision_2'] = 1 -BMC_INFO_TEMPLATE['IPMI_Version'] = 1 -BMC_INFO_TEMPLATE['Additional_Device_support'] = 1 -BMC_INFO_TEMPLATE['Manufacturer_ID'] = 3 -BMC_INFO_TEMPLATE['Product_ID'] = 2 -BMC_INFO_TEMPLATE['Auxiliary_Firmware_Revision'] = 4 - -NM_STATISTICS_TEMPLATE = collections.OrderedDict() -NM_STATISTICS_TEMPLATE['Manufacturer_ID'] = 3 -NM_STATISTICS_TEMPLATE['Current_value'] = 2 -NM_STATISTICS_TEMPLATE['Minimum_value'] = 2 -NM_STATISTICS_TEMPLATE['Maximum_value'] = 2 -NM_STATISTICS_TEMPLATE['Average_value'] = 2 -NM_STATISTICS_TEMPLATE['Time_stamp'] = 4 -NM_STATISTICS_TEMPLATE['Report_period'] = 4 -NM_STATISTICS_TEMPLATE["DomainID_PolicyState"] = 1 - -NM_GET_DEVICE_ID_TEMPLATE = collections.OrderedDict() -NM_GET_DEVICE_ID_TEMPLATE['Device_ID'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Device_revision'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Firmware_revision_1'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Firmware_Revision_2'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['IPMI_Version'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Additinal_Device_support'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Manufacturer_ID'] = 3 -NM_GET_DEVICE_ID_TEMPLATE['Product_ID_min_version'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Product_ID_major_version'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Implemented_firmware'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Firmware_build_number'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Last_digit_firmware_build_number'] = 1 -NM_GET_DEVICE_ID_TEMPLATE['Image_flags'] = 1 - -NM_GET_VERSION_TEMPLATE = collections.OrderedDict() -NM_GET_VERSION_TEMPLATE['Manufacturer_ID'] = 3 -NM_GET_VERSION_TEMPLATE['NM_Version'] = 1 -NM_GET_VERSION_TEMPLATE['IPMI_Version'] = 1 -NM_GET_VERSION_TEMPLATE['Patch_Version'] = 1 -NM_GET_VERSION_TEMPLATE['Firmware_Revision_Major'] = 1 -NM_GET_VERSION_TEMPLATE['Firmware_Revision_Minor'] = 1 - -NM_CUPS_UTILIZATION_TEMPLATE = collections.OrderedDict() -NM_CUPS_UTILIZATION_TEMPLATE['Manufacturer_ID'] = 3 -NM_CUPS_UTILIZATION_TEMPLATE['CPU_Utilization'] = 8 -NM_CUPS_UTILIZATION_TEMPLATE['Mem_Utilization'] = 8 -NM_CUPS_UTILIZATION_TEMPLATE['IO_Utilization'] = 8 - -NM_CUPS_INDEX_TEMPLATE = collections.OrderedDict() -NM_CUPS_INDEX_TEMPLATE['Manufacturer_ID'] = 3 -NM_CUPS_INDEX_TEMPLATE['CUPS_Index'] = 2 - - -def _hex(list=None): - """Format the return value in list into hex.""" - - list = list or [] - if list: - list.reverse() - return int(''.join(list), 16) - - return 0 - - -class NodeManager(object): - """The python implementation of Intel Node Manager engine using ipmitool - - The class implements the engine to read power and temperature of - compute node. It uses ipmitool to execute the IPMI command and parse - the output into dict. - """ - _inited = False - _instance = None - - def __new__(cls, *args, **kwargs): - """Singleton to avoid duplicated initialization.""" - if not cls._instance: - cls._instance = super(NodeManager, cls).__new__(cls, *args, - **kwargs) - return cls._instance - - def __init__(self): - if not (self._instance and self._inited): - # As singleton, only the 1st NM pollster would trigger its - # initialization. nm_version indicate init result, and is shared - # across all pollsters - self._inited = True - self.nm_version = 0 - self.channel_slave = '' - - self.nm_version = self.check_node_manager() - - @staticmethod - def _parse_slave_and_channel(file_path): - """Parse the dumped file to get slave address and channel number. - - :param file_path: file path of dumped SDR file. - :return: slave address and channel number of target device or None if - not found. - """ - prefix = INTEL_PREFIX - # According to Intel Node Manager spec, section 4.5, for Intel NM - # discovery OEM SDR records are type C0h. It contains manufacture ID - # and OEM data in the record body. - # 0-2 bytes are OEM ID, byte 3 is 0Dh and byte 4 is 01h. Byte 5, 6 - # is Intel NM device slave address and channel number/sensor owner LUN. - with open(file_path, 'rb') as bin_fp: - data_str = binascii.hexlify(bin_fp.read()) - - if six.PY3: - data_str = data_str.decode('ascii') - oem_id_index = data_str.find(prefix) - if oem_id_index != -1: - ret = data_str[oem_id_index + len(prefix): - oem_id_index + len(prefix) + 4] - # Byte 5 is slave address. [7:4] from byte 6 is channel - # number, so just pick ret[2] here. - return (ret[0:2], ret[2]) - - @ipmitool.execute_ipmi_cmd(BMC_INFO_TEMPLATE) - def get_device_id(self): - """IPMI command GET_DEVICE_ID.""" - return IPMIRAWCMD["get_device_id"] - - @ipmitool.execute_ipmi_cmd(ONE_RETURN_TEMPLATE) - def _init_sensor_agent(self): - """Run initialization agent.""" - return IPMIRAWCMD["init_sensor_agent"] - - @ipmitool.execute_ipmi_cmd(ONE_RETURN_TEMPLATE) - def _init_sensor_agent_process(self): - """Check the status of initialization agent.""" - return IPMIRAWCMD["init_sensor_agent_status"] - - @ipmitool.execute_ipmi_cmd() - def _dump_sdr_file(self, data_file=""): - """Dump SDR into a file.""" - return IPMICMD["sdr_dump"] + " " + data_file - - @ipmitool.execute_ipmi_cmd(NM_GET_DEVICE_ID_TEMPLATE) - def _node_manager_get_device_id(self): - """GET_DEVICE_ID command in Intel Node Manager - - Different from IPMI command GET_DEVICE_ID, it contains more information - of Intel Node Manager. - """ - return self.channel_slave + ' ' + IPMIRAWCMD["get_device_id"] - - @ipmitool.execute_ipmi_cmd(NM_GET_VERSION_TEMPLATE) - def _node_manager_get_version(self): - """GET_NODE_MANAGER_VERSION command in Intel Node Manager - - Byte 4 of the response: - 01h - Intel NM 1.0 - 02h - Intel NM 1.5 - 03h - Intel NM 2.0 - 04h - Intel NM 2.5 - 05h - Intel NM 3.0 - """ - return self.channel_slave + ' ' + IPMIRAWCMD["get_nm_version"] - - @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) - def _read_power_all(self): - """Get the power consumption of the whole platform.""" - return self.channel_slave + ' ' + IPMIRAWCMD['read_power_all'] - - @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) - def _read_inlet_temperature(self): - """Get the inlet temperature info of the whole platform.""" - return self.channel_slave + ' ' + IPMIRAWCMD['read_inlet_temperature'] - - @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) - def _read_outlet_temperature(self): - """Get the outlet temperature info of the whole platform.""" - return self.channel_slave + ' ' + IPMIRAWCMD['read_outlet_temperature'] - - @ipmitool.execute_ipmi_cmd(NM_STATISTICS_TEMPLATE) - def _read_airflow(self): - """Get the volumetric airflow of the whole platform.""" - return self.channel_slave + ' ' + IPMIRAWCMD['read_airflow'] - - @ipmitool.execute_ipmi_cmd(NM_CUPS_UTILIZATION_TEMPLATE) - def _read_cups_utilization(self): - """Get the average CUPS utilization of the whole platform.""" - return self.channel_slave + ' ' + IPMIRAWCMD['read_cups_utilization'] - - @ipmitool.execute_ipmi_cmd(NM_CUPS_INDEX_TEMPLATE) - def _read_cups_index(self): - """Get the CUPS Index of the whole platform.""" - return self.channel_slave + ' ' + IPMIRAWCMD['read_cups_index'] - - def read_power_all(self): - return self._read_power_all() if self.nm_version > 0 else {} - - def read_inlet_temperature(self): - return self._read_inlet_temperature() if self.nm_version > 0 else {} - - def read_outlet_temperature(self): - return self._read_outlet_temperature() if self.nm_version >= 5 else {} - - def read_airflow(self): - # only available after NM 3.0 - return self._read_airflow() if self.nm_version >= 5 else {} - - def read_cups_utilization(self): - # only available after NM 3.0 - return self._read_cups_utilization() if self.nm_version >= 5 else {} - - def read_cups_index(self): - # only available after NM 3.0 - return self._read_cups_index() if self.nm_version >= 5 else {} - - def init_node_manager(self): - if self._init_sensor_agent_process()['ret'] == ['01']: - return - # Run sensor initialization agent - for i in range(CONF.ipmi.node_manager_init_retry): - self._init_sensor_agent() - time.sleep(1) - if self._init_sensor_agent_process()['ret'] == ['01']: - return - - raise nmexcept.NodeManagerException(_('Node Manager init failed')) - - def discover_slave_channel(self): - """Discover target slave address and channel number.""" - file_path = tempfile.mkstemp()[1] - self._dump_sdr_file(data_file=file_path) - ret = self._parse_slave_and_channel(file_path) - slave_address = ''.join(['0x', ret[0]]) - channel = ''.join(['0x', ret[1]]) - # String of channel and slave_address - self.channel_slave = '-b ' + channel + ' -t ' + slave_address - - def node_manager_version(self): - """Intel Node Manager capability checking - - This function is used to detect if compute node support Intel Node - Manager(return version number) or not(return -1) and parse out the - slave address and channel number of node manager. - """ - self.manufacturer_id = self.get_device_id()['Manufacturer_ID'] - if MANUFACTURER_ID_INTEL != self.manufacturer_id: - # If the manufacturer is not Intel, just set False and return. - return 0 - - self.discover_slave_channel() - support = self._node_manager_get_device_id()['Implemented_firmware'] - # According to Intel Node Manager spec, return value of GET_DEVICE_ID, - # bits 3 to 0 shows if Intel NM implemented or not. - if int(support[0], 16) & 0xf == 0: - return 0 - - return _hex(self._node_manager_get_version()['NM_Version']) - - def check_node_manager(self): - """Intel Node Manager init and check - - This function is used to initialize Intel Node Manager and check the - capability without throwing exception. It's safe to call it on - non-NodeManager platform. - """ - try: - self.init_node_manager() - nm_version = self.node_manager_version() - except (nmexcept.NodeManagerException, nmexcept.IPMIException): - return 0 - return nm_version diff --git a/ceilometer/ipmi/platform/ipmi_sensor.py b/ceilometer/ipmi/platform/ipmi_sensor.py deleted file mode 100644 index e6d32f19..00000000 --- a/ceilometer/ipmi/platform/ipmi_sensor.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2014 Intel Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""IPMI sensor to collect various sensor data of compute node""" - -from ceilometer.i18n import _ -from ceilometer.ipmi.platform import exception as ipmiexcept -from ceilometer.ipmi.platform import ipmitool - -IPMICMD = {"sdr_dump": "sdr dump", - "sdr_info": "sdr info", - "sensor_dump": "sdr -v", - "sensor_dump_temperature": "sdr -v type Temperature", - "sensor_dump_current": "sdr -v type Current", - "sensor_dump_fan": "sdr -v type Fan", - "sensor_dump_voltage": "sdr -v type Voltage"} - -# Requires translation of output into dict -DICT_TRANSLATE_TEMPLATE = {"translate": 1} - - -class IPMISensor(object): - """The python implementation of IPMI sensor using ipmitool - - The class implements the IPMI sensor to get various sensor data of - compute node. It uses ipmitool to execute the IPMI command and parse - the output into dict. - """ - _inited = False - _instance = None - - def __new__(cls, *args, **kwargs): - """Singleton to avoid duplicated initialization.""" - if not cls._instance: - cls._instance = super(IPMISensor, cls).__new__(cls, *args, - **kwargs) - return cls._instance - - def __init__(self): - if not (self._instance and self._inited): - self.ipmi_support = False - self._inited = True - - self.ipmi_support = self.check_ipmi() - - @ipmitool.execute_ipmi_cmd() - def _get_sdr_info(self): - """Get the SDR info.""" - return IPMICMD['sdr_info'] - - @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) - def _read_sensor_all(self): - """Get the sensor data for type.""" - return IPMICMD['sensor_dump'] - - @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) - def _read_sensor_temperature(self): - """Get the sensor data for Temperature.""" - return IPMICMD['sensor_dump_temperature'] - - @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) - def _read_sensor_voltage(self): - """Get the sensor data for Voltage.""" - return IPMICMD['sensor_dump_voltage'] - - @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) - def _read_sensor_current(self): - """Get the sensor data for Current.""" - return IPMICMD['sensor_dump_current'] - - @ipmitool.execute_ipmi_cmd(DICT_TRANSLATE_TEMPLATE) - def _read_sensor_fan(self): - """Get the sensor data for Fan.""" - return IPMICMD['sensor_dump_fan'] - - def read_sensor_any(self, sensor_type=''): - """Get the sensor data for type.""" - if not self.ipmi_support: - return {} - - mapping = {'': self._read_sensor_all, - 'Temperature': self._read_sensor_temperature, - 'Fan': self._read_sensor_fan, - 'Voltage': self._read_sensor_voltage, - 'Current': self._read_sensor_current} - - try: - return mapping[sensor_type]() - except KeyError: - raise ipmiexcept.IPMIException(_('Wrong sensor type')) - - def check_ipmi(self): - """IPMI capability checking - - This function is used to detect if compute node is IPMI capable - platform. Just run a simple IPMI command to get SDR info for check. - """ - try: - self._get_sdr_info() - except ipmiexcept.IPMIException: - return False - return True diff --git a/ceilometer/ipmi/platform/ipmitool.py b/ceilometer/ipmi/platform/ipmitool.py deleted file mode 100644 index 7b049588..00000000 --- a/ceilometer/ipmi/platform/ipmitool.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utils to run ipmitool for data collection""" -from oslo_concurrency import processutils - -from ceilometer.i18n import _ -from ceilometer.ipmi.platform import exception as ipmiexcept -from ceilometer import utils - - -# Following 2 functions are copied from ironic project to handle ipmitool's -# sensor data output. Need code clean and sharing in future. -# Check ironic/drivers/modules/ipmitool.py - - -def _get_sensor_type(sensor_data_dict): - # Have only three sensor type name IDs: 'Sensor Type (Analog)' - # 'Sensor Type (Discrete)' and 'Sensor Type (Threshold)' - - for key in ('Sensor Type (Analog)', 'Sensor Type (Discrete)', - 'Sensor Type (Threshold)'): - try: - return sensor_data_dict[key].split(' ', 1)[0] - except KeyError: - continue - - raise ipmiexcept.IPMIException(_("parse IPMI sensor data failed," - "unknown sensor type")) - - -def _process_sensor(sensor_data): - sensor_data_fields = sensor_data.split('\n') - sensor_data_dict = {} - for field in sensor_data_fields: - if not field: - continue - kv_value = field.split(':') - if len(kv_value) != 2: - continue - sensor_data_dict[kv_value[0].strip()] = kv_value[1].strip() - - return sensor_data_dict - - -def _translate_output(output): - """Translate the return value into JSON dict - - :param output: output of the execution of IPMI command(sensor reading) - """ - sensors_data_dict = {} - - sensors_data_array = output.split('\n\n') - for sensor_data in sensors_data_array: - sensor_data_dict = _process_sensor(sensor_data) - if not sensor_data_dict: - continue - - sensor_type = _get_sensor_type(sensor_data_dict) - - # ignore the sensors which have no current 'Sensor Reading' data - sensor_id = sensor_data_dict['Sensor ID'] - if 'Sensor Reading' in sensor_data_dict: - sensors_data_dict.setdefault(sensor_type, - {})[sensor_id] = sensor_data_dict - - # get nothing, no valid sensor data - if not sensors_data_dict: - raise ipmiexcept.IPMIException(_("parse IPMI sensor data failed," - "No data retrieved from given input")) - return sensors_data_dict - - -def _parse_output(output, template): - """Parse the return value of IPMI command into dict - - :param output: output of the execution of IPMI command - :param template: a dict that contains the expected items of - IPMI command and its length. - """ - ret = {} - index = 0 - if not (output and template): - return ret - - if "translate" in template: - ret = _translate_output(output) - else: - output_list = output.strip().replace('\n', '').split(' ') - if sum(template.values()) != len(output_list): - raise ipmiexcept.IPMIException(_("ipmitool output " - "length mismatch")) - for item in template.items(): - index_end = index + item[1] - update_value = output_list[index: index_end] - ret[item[0]] = update_value - index = index_end - return ret - - -def execute_ipmi_cmd(template=None): - """Decorator for the execution of IPMI command. - - It parses the output of IPMI command into dictionary. - """ - - template = template or [] - - def _execute_ipmi_cmd(f): - def _execute(self, **kwargs): - args = ['ipmitool'] - command = f(self, **kwargs) - args.extend(command.split(" ")) - try: - (out, __) = utils.execute(*args, run_as_root=True) - except processutils.ProcessExecutionError: - raise ipmiexcept.IPMIException(_("running ipmitool failure")) - return _parse_output(out, template) - return _execute - - return _execute_ipmi_cmd diff --git a/ceilometer/ipmi/pollsters/__init__.py b/ceilometer/ipmi/pollsters/__init__.py deleted file mode 100644 index 9ebbf230..00000000 --- a/ceilometer/ipmi/pollsters/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2014 Intel Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Pollsters for IPMI and Intel Node Manager -""" - -from oslo_config import cfg - -OPTS = [ - cfg.IntOpt('polling_retry', - default=3, - help='Tolerance of IPMI/NM polling failures ' - 'before disable this pollster. ' - 'Negative indicates retrying forever.') -] - -cfg.CONF.register_opts(OPTS, group='ipmi') diff --git a/ceilometer/ipmi/pollsters/node.py b/ceilometer/ipmi/pollsters/node.py deleted file mode 100644 index 8540cc4c..00000000 --- a/ceilometer/ipmi/pollsters/node.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright 2014 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from oslo_config import cfg -from oslo_log import log -import six - -from ceilometer.agent import plugin_base -from ceilometer.i18n import _ -from ceilometer.ipmi.platform import exception as nmexcept -from ceilometer.ipmi.platform import intel_node_manager as node_manager -from ceilometer import sample - -CONF = cfg.CONF -CONF.import_opt('host', 'ceilometer.service') -CONF.import_opt('polling_retry', 'ceilometer.ipmi.pollsters', - group='ipmi') - -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class _Base(plugin_base.PollsterBase): - - def setup_environment(self): - super(_Base, self).setup_environment() - self.nodemanager = node_manager.NodeManager() - self.polling_failures = 0 - - # Do not load this extension if no NM support - if self.nodemanager.nm_version == 0: - raise plugin_base.ExtensionLoadError() - - @property - def default_discovery(self): - return 'local_node' - - def get_value(self, stats): - """Get value from statistics.""" - return node_manager._hex(stats["Current_value"]) - - @abc.abstractmethod - def read_data(self, cache): - """Return data sample for IPMI.""" - - def get_samples(self, manager, cache, resources): - # Only one resource for Node Manager pollster - try: - stats = self.read_data(cache) - except nmexcept.IPMIException: - self.polling_failures += 1 - LOG.warning(_('Polling %(name)s failed for %(cnt)s times!') - % ({'name': self.NAME, - 'cnt': self.polling_failures})) - if 0 <= CONF.ipmi.polling_retry < self.polling_failures: - LOG.warning(_('Pollster for %s is disabled!') % self.NAME) - raise plugin_base.PollsterPermanentError(resources) - else: - return - - self.polling_failures = 0 - - metadata = { - 'node': CONF.host - } - - if stats: - data = self.get_value(stats) - - yield sample.Sample( - name=self.NAME, - type=self.TYPE, - unit=self.UNIT, - volume=data, - user_id=None, - project_id=None, - resource_id=CONF.host, - resource_metadata=metadata) - - -class InletTemperaturePollster(_Base): - # Note(ildikov): The new meter name should be - # "hardware.ipmi.node.inlet_temperature". As currently there - # is no meter deprecation support in the code, we should use the - # old name in order to avoid confusion. - NAME = "hardware.ipmi.node.temperature" - TYPE = sample.TYPE_GAUGE - UNIT = "C" - - def read_data(self, cache): - return self.nodemanager.read_inlet_temperature() - - -class OutletTemperaturePollster(_Base): - NAME = "hardware.ipmi.node.outlet_temperature" - TYPE = sample.TYPE_GAUGE - UNIT = "C" - - def read_data(self, cache): - return self.nodemanager.read_outlet_temperature() - - -class PowerPollster(_Base): - NAME = "hardware.ipmi.node.power" - TYPE = sample.TYPE_GAUGE - UNIT = "W" - - def read_data(self, cache): - return self.nodemanager.read_power_all() - - -class AirflowPollster(_Base): - NAME = "hardware.ipmi.node.airflow" - TYPE = sample.TYPE_GAUGE - UNIT = "CFM" - - def read_data(self, cache): - return self.nodemanager.read_airflow() - - -class CUPSIndexPollster(_Base): - NAME = "hardware.ipmi.node.cups" - TYPE = sample.TYPE_GAUGE - UNIT = "CUPS" - - def read_data(self, cache): - return self.nodemanager.read_cups_index() - - def get_value(self, stats): - return node_manager._hex(stats["CUPS_Index"]) - - -class _CUPSUtilPollsterBase(_Base): - CACHE_KEY_CUPS = 'CUPS' - - def read_data(self, cache): - i_cache = cache.setdefault(self.CACHE_KEY_CUPS, {}) - if not i_cache: - i_cache.update(self.nodemanager.read_cups_utilization()) - return i_cache - - -class CPUUtilPollster(_CUPSUtilPollsterBase): - NAME = "hardware.ipmi.node.cpu_util" - TYPE = sample.TYPE_GAUGE - UNIT = "%" - - def get_value(self, stats): - return node_manager._hex(stats["CPU_Utilization"]) - - -class MemUtilPollster(_CUPSUtilPollsterBase): - NAME = "hardware.ipmi.node.mem_util" - TYPE = sample.TYPE_GAUGE - UNIT = "%" - - def get_value(self, stats): - return node_manager._hex(stats["Mem_Utilization"]) - - -class IOUtilPollster(_CUPSUtilPollsterBase): - NAME = "hardware.ipmi.node.io_util" - TYPE = sample.TYPE_GAUGE - UNIT = "%" - - def get_value(self, stats): - return node_manager._hex(stats["IO_Utilization"]) diff --git a/ceilometer/ipmi/pollsters/sensor.py b/ceilometer/ipmi/pollsters/sensor.py deleted file mode 100644 index 249913ce..00000000 --- a/ceilometer/ipmi/pollsters/sensor.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2014 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log - -from ceilometer.agent import plugin_base -from ceilometer.i18n import _ -from ceilometer.ipmi.notifications import ironic as parser -from ceilometer.ipmi.platform import exception as ipmiexcept -from ceilometer.ipmi.platform import ipmi_sensor -from ceilometer import sample - -CONF = cfg.CONF -CONF.import_opt('host', 'ceilometer.service') -CONF.import_opt('polling_retry', 'ceilometer.ipmi.pollsters', - group='ipmi') - -LOG = log.getLogger(__name__) - - -class InvalidSensorData(ValueError): - pass - - -class SensorPollster(plugin_base.PollsterBase): - METRIC = None - - def setup_environment(self): - super(SensorPollster, self).setup_environment() - self.ipmi = ipmi_sensor.IPMISensor() - self.polling_failures = 0 - - # Do not load this extension if no IPMI support - if not self.ipmi.ipmi_support: - raise plugin_base.ExtensionLoadError() - - @property - def default_discovery(self): - return 'local_node' - - @staticmethod - def _get_sensor_types(data, sensor_type): - try: - return (sensor_type_data for _, sensor_type_data - in data[sensor_type].items()) - except KeyError: - return [] - - def get_samples(self, manager, cache, resources): - # Only one resource for IPMI pollster - try: - stats = self.ipmi.read_sensor_any(self.METRIC) - except ipmiexcept.IPMIException: - self.polling_failures += 1 - LOG.warning(_( - 'Polling %(mtr)s sensor failed for %(cnt)s times!') - % ({'mtr': self.METRIC, - 'cnt': self.polling_failures})) - if 0 <= CONF.ipmi.polling_retry < self.polling_failures: - LOG.warning(_('Pollster for %s is disabled!') % self.METRIC) - raise plugin_base.PollsterPermanentError(resources) - else: - return - - self.polling_failures = 0 - - sensor_type_data = self._get_sensor_types(stats, self.METRIC) - - for sensor_data in sensor_type_data: - # Continue if sensor_data is not parseable. - try: - sensor_reading = sensor_data['Sensor Reading'] - sensor_id = sensor_data['Sensor ID'] - except KeyError: - continue - - if not parser.validate_reading(sensor_reading): - continue - - try: - volume, unit = parser.parse_reading(sensor_reading) - except parser.InvalidSensorData: - continue - - resource_id = '%(host)s-%(sensor-id)s' % { - 'host': CONF.host, - 'sensor-id': parser.transform_id(sensor_id) - } - - metadata = { - 'node': CONF.host - } - - yield sample.Sample( - name='hardware.ipmi.%s' % self.METRIC.lower(), - type=sample.TYPE_GAUGE, - unit=unit, - volume=volume, - user_id=None, - project_id=None, - resource_id=resource_id, - resource_metadata=metadata) - - -class TemperatureSensorPollster(SensorPollster): - METRIC = 'Temperature' - - -class CurrentSensorPollster(SensorPollster): - METRIC = 'Current' - - -class FanSensorPollster(SensorPollster): - METRIC = 'Fan' - - -class VoltageSensorPollster(SensorPollster): - METRIC = 'Voltage' diff --git a/ceilometer/keystone_client.py b/ceilometer/keystone_client.py deleted file mode 100644 index 7731176e..00000000 --- a/ceilometer/keystone_client.py +++ /dev/null @@ -1,78 +0,0 @@ -# -# Copyright 2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from keystoneauth1 import loading as ka_loading -from keystoneclient.v3 import client as ks_client_v3 -from oslo_config import cfg -from oslo_log import log - -LOG = log.getLogger(__name__) - -CFG_GROUP = "service_credentials" - - -def get_session(requests_session=None): - """Get a ceilometer service credentials auth session.""" - auth_plugin = ka_loading.load_auth_from_conf_options(cfg.CONF, CFG_GROUP) - session = ka_loading.load_session_from_conf_options( - cfg.CONF, CFG_GROUP, auth=auth_plugin, session=requests_session - ) - return session - - -def get_client(trust_id=None, requests_session=None): - """Return a client for keystone v3 endpoint, optionally using a trust.""" - session = get_session(requests_session=requests_session) - return ks_client_v3.Client(session=session, trust_id=trust_id) - - -def get_service_catalog(client): - return client.session.auth.get_access(client.session).service_catalog - - -def get_auth_token(client): - return client.session.auth.get_access(client.session).auth_token - - -CLI_OPTS = [ - cfg.StrOpt('region-name', - deprecated_group="DEFAULT", - deprecated_name="os-region-name", - default=os.environ.get('OS_REGION_NAME'), - help='Region name to use for OpenStack service endpoints.'), - cfg.StrOpt('interface', - default=os.environ.get( - 'OS_INTERFACE', os.environ.get('OS_ENDPOINT_TYPE', - 'public')), - deprecated_name="os-endpoint-type", - choices=('public', 'internal', 'admin', 'auth', 'publicURL', - 'internalURL', 'adminURL'), - help='Type of endpoint in Identity service catalog to use for ' - 'communication with OpenStack services.'), -] - -cfg.CONF.register_cli_opts(CLI_OPTS, group=CFG_GROUP) - - -def register_keystoneauth_opts(conf): - ka_loading.register_auth_conf_options(conf, CFG_GROUP) - ka_loading.register_session_conf_options( - conf, CFG_GROUP, - deprecated_opts={'cacert': [ - cfg.DeprecatedOpt('os-cacert', group=CFG_GROUP), - cfg.DeprecatedOpt('os-cacert', group="DEFAULT")] - }) diff --git a/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-error.po b/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-error.po deleted file mode 100644 index 09ccf25e..00000000 --- a/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-error.po +++ /dev/null @@ -1,138 +0,0 @@ -# Andreas Jaeger , 2016. #zanata -# Monika Wolf , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-03 03:36+0000\n" -"Last-Translator: Monika Wolf \n" -"Language-Team: German\n" -"Language: de\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -#, python-format -msgid "Cannot load inspector %(name)s: %(err)s" -msgstr "Inspector %(name)s kann nicht geladen werden: %(err)s" - -#, python-format -msgid "Could not get Resident Memory Usage for %(id)s: %(e)s" -msgstr "" -"Die Verwendung des residenten Speichers für %(id)s konnte nicht abgerufen " -"werden: %(e)s" - -#, python-format -msgid "Dispatcher failed to handle the %s, requeue it." -msgstr "" -"Dispatcher konnte %s nicht verarbeiten. Erneut in Warteschlange stellen." - -msgid "Error connecting to coordination backend." -msgstr "Fehler beim Herstellen einer Verbindung zum Koordinierungs-Back-End." - -msgid "Error getting group membership info from coordination backend." -msgstr "" -"Fehler beim Abrufen von Mitgliedschaftsinformationen vom Koordinierungs-Back-" -"End." - -#, python-format -msgid "Error joining partitioning group %s, re-trying" -msgstr "" -"Fehler beim Beitreten zur Partitionierungsgruppe %s. Operation wird " -"wiederholt." - -#, python-format -msgid "Error processing event and it will be dropped: %s" -msgstr "Fehler beim Verarbeiten des Ereignisses und es wird gelöscht: %s" - -msgid "Error sending a heartbeat to coordination backend." -msgstr "" -"Fehler beim Senden eines Überwachungssignals an das Koordinierungs-Back-End." - -msgid "Fail to process a notification" -msgstr "Eine Benachrichtigung konnte nicht verarbeitet werden." - -msgid "Fail to process notification" -msgstr "Benachrichtigung konnte nicht verarbeitet werden." - -msgid "Failed to connect to Gnocchi." -msgstr "Fehler beim Herstellen einer Verbindung zu Gnocchi." - -#, python-format -msgid "Failed to connect to Kafka service: %s" -msgstr "Fehler beim Herstellen einer Verbindung zum Kafka-Service: %s" - -#, python-format -msgid "Failed to connect to db, purpose %(purpose)s re-try later: %(err)s" -msgstr "" -"Fehler beim Herstellen einer Verbindung zur Datenbank. Zweck: %(purpose)s " -"Später erneut versuchen: %(err)s" - -#, python-format -msgid "Failed to connect to db, purpose %(purpose)s retry later: %(err)s" -msgstr "" -"Fehler beim Herstellen einer Verbindung zur Datenbank. Zweck: %(purpose)s " -"Später erneut versuchen: %(err)s" - -#, python-format -msgid "Failed to load resource due to error %s" -msgstr "Fehler beim Laden der Ressource aufgrund des folgenden Fehlers %s" - -#, python-format -msgid "Failed to record event: %s" -msgstr "Das Ereignis konnte nicht aufgezeichnet werden: %s" - -#, python-format -msgid "Invalid type %s specified" -msgstr "Ungültigen Typ %s angegeben" - -#, python-format -msgid "Missing field %s" -msgstr "Fehlendes Feld %s" - -msgid "Passed resource dict must contain keys resource_id and resource_url." -msgstr "" -"Das übergebene Ressourcenwörterverzeichnis muss die Schlüssel für " -"resource_id und resource_url enthalten." - -#, python-format -msgid "Required field %(field)s should be a %(type)s" -msgstr "Erforderliches Feld %(field)s muss %(type)s sein." - -#, python-format -msgid "Required field %s not specified" -msgstr "Erforderliches Feld %s nicht angegeben." - -#, python-format -msgid "Required fields %s not specified" -msgstr "Erforderliche Felder %s nicht angegeben." - -#, python-format -msgid "Skip invalid resource %s" -msgstr "Ungültige Ressource %s überspringen" - -#, python-format -msgid "Skipping %(name)s, keystone issue: %(exc)s" -msgstr "%(name)s wird übersprungen, Keystone-Problem: %(exc)s" - -msgid "Status Code: %{code}s. Failed todispatch event: %{event}s" -msgstr "Statuscode: %{code}s. Fehler beim Versenden des Ereignisses: %{event}s" - -#, python-format -msgid "Unable to load changed event pipeline: %s" -msgstr "Die geänderte Ereignispipeline konnte nicht geladen werden: %s" - -#, python-format -msgid "Unable to load changed pipeline: %s" -msgstr "Die geänderte Pipeline konnte nicht geladen werden: %s" - -#, python-format -msgid "Unrecognized type value %s" -msgstr "Nicht erkannter Typwert %s" - -#, python-format -msgid "inspector call failed for %(ident)s host %(host)s: %(err)s" -msgstr "Inspector-Aufruf fehlgeschlagen für %(ident)s Host %(host)s: %(err)s" diff --git a/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-info.po b/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-info.po deleted file mode 100644 index e40dedff..00000000 --- a/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-info.po +++ /dev/null @@ -1,145 +0,0 @@ -# Andreas Jaeger , 2016. #zanata -# Frank Kloeker , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-06-06 06:20+0000\n" -"Last-Translator: Andreas Jaeger \n" -"Language-Team: German\n" -"Language: de\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -#, python-format -msgid "%d events are removed from database" -msgstr "%d Ereignisse aus Datenbank entfernt" - -#, python-format -msgid "%d samples removed from database" -msgstr "%d Beispiele aus Datenbank entfernt" - -msgid "Configuration:" -msgstr "Konfiguration:" - -#, python-format -msgid "Connecting to %(db)s on %(nodelist)s" -msgstr "Verbindung mit %(db)s auf %(nodelist)s wird hergestellt" - -msgid "Coordination backend started successfully." -msgstr "Das Koordinierungs-Back-End wurde erfolgreich gestartet." - -#, python-format -msgid "Definitions: %s" -msgstr "Definitionen: %s" - -msgid "Detected change in pipeline configuration." -msgstr "Es wurde eine Änderung in der Pipelinekonfiguration festgestellt." - -#, python-format -msgid "Dropping event data with TTL %d" -msgstr "Löschen von Ereignisdaten mit TTL %d" - -#, python-format -msgid "Dropping metering data with TTL %d" -msgstr "Löschen von Messdaten mit TTL %d" - -#, python-format -msgid "Duplicate event detected, skipping it: %s" -msgstr "Doppeltes Ereignis erkannt. Wird übersprungen: %s" - -msgid "Expired residual resource and meter definition data" -msgstr "Abgelaufene Daten für residente Ressource und für Messdefinition" - -#, python-format -msgid "Index %s will be recreate." -msgstr "Index %s wird erneut erstellt. " - -#, python-format -msgid "Joined partitioning group %s" -msgstr "Partitionierungsgruppe %s beigetreten." - -#, python-format -msgid "Left partitioning group %s" -msgstr "Partitionierungsgruppe %s verlassen." - -#, python-format -msgid "No limit value provided, result set will be limited to %(limit)d." -msgstr "" -"Es wurde kein Grenzwert angegeben. Der Ergebnissatz wird auf %(limit)d " -"beschränkt." - -msgid "Nothing to clean, database event time to live is disabled" -msgstr "" -"Nichts zu bereinigen. Die Lebensdauer (TTL) der Datenbankereignisdaten ist " -"deaktiviert." - -msgid "Nothing to clean, database metering time to live is disabled" -msgstr "" -"Nichts zu bereinigen. Die Lebensdauer (TTL) der Datenbankstichprobendaten " -"ist deaktiviert." - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter " -"%(param)s" -msgstr "" -"Pipeline %(pipeline)s: Konfiguration von Transformerinstanz %(name)s mit " -"Parameter %(param)s" - -#, python-format -msgid "Pipeline config: %s" -msgstr "Pipelinekonfiguration: %s" - -msgid "Pipeline configuration file has been updated." -msgstr "Die Pipelinekonfigurationsdatei wurde aktualisiert." - -#, python-format -msgid "Polling pollster %(poll)s in the context of %(src)s" -msgstr "Abfrage von Pollster %(poll)s im Kontext von %(src)s" - -#, python-format -msgid "Publishing policy set to %s" -msgstr "Veröffentlichungsrichtlinie auf %s gesetzt" - -msgid "Reconfiguring polling tasks." -msgstr "Polling-Tasks werden neu konfiguriert." - -msgid "Reloading notification agent and listeners." -msgstr "Benachrichtigungsagent und Listener werden erneut geladen." - -#, python-format -msgid "Skip pollster %(name)s, no %(p_context)sresources found this cycle" -msgstr "" -"Pollster %(name)s überspringen, keine %(p_context)sressourcen in diesem " -"Zyklus gefunden." - -#, python-format -msgid "Starting server in PID %s" -msgstr "Starten von Server in PID %s" - -#, python-format -msgid "Swift endpoint not found: %s" -msgstr "Swift-Endpunkt konnte nicht gefunden werden: %s" - -msgid "detected decoupled pipeline config format" -msgstr "entkoppeltes Pipeline-Konfigurationsformat erkannt" - -#, python-format -msgid "metering data %(counter_name)s for %(resource_id)s: %(counter_volume)s" -msgstr "" -"Messung von Daten %(counter_name)s für %(resource_id)s: %(counter_volume)s" - -#, python-format -msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s" -msgstr "" -"Bereitstellung auf 0.0.0.0:%(sport)s, Ansicht unter http://127.0.0.1:" -"%(vport)s" - -#, python-format -msgid "serving on http://%(host)s:%(port)s" -msgstr "Bereitstellung auf http://%(host)s:%(port)s" diff --git a/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-warning.po b/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-warning.po deleted file mode 100644 index 5b444022..00000000 --- a/ceilometer/locale/de/LC_MESSAGES/ceilometer-log-warning.po +++ /dev/null @@ -1,125 +0,0 @@ -# Monika Wolf , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-04 10:08+0000\n" -"Last-Translator: Monika Wolf \n" -"Language-Team: German\n" -"Language: de\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -msgid "" -"Cannot extract tasks because agent failed to join group properly. Rejoining " -"group." -msgstr "" -"Extrahieren der Tasks nicht möglich, da der Agent nicht ordnungsgemäß in die " -"Gruppe eingebunden werden konnte. Operation zum Wiedereinbinden in die " -"Gruppe wird durchgeführt." - -#, python-format -msgid "" -"Cannot inspect data of %(pollster)s for %(instance_id)s, non-fatal reason: " -"%(exc)s" -msgstr "" -"Die %(pollster)s-Daten für %(instance_id)s können nicht untersucht werden. " -"Behebbare Ursache: %(exc)s" - -#, python-format -msgid "Dropping out of time order sample: %s" -msgstr "" -"Löschen des nicht in die zeitliche Reihenfolge gehörenden Beispiels: %s" - -#, python-format -msgid "Dropping sample with no predecessor: %s" -msgstr "Beispiel ohne Vorgänger wird gelöscht: %s" - -#, python-format -msgid "Failed to load any dispatchers for %s" -msgstr "Es konnten keine Dispatcher für %s geladen werden." - -#, python-format -msgid "Ignore unrecognized field %s" -msgstr "Nicht erkanntes Feld %s ignorieren" - -#, python-format -msgid "Invalid status, skipping IP address %s" -msgstr "Ungültiger Status. IP-Adresse %s wird übersprungen." - -msgid "Negative delta detected, dropping value" -msgstr "Negatives Delta erkannt. Wert wird verworfen." - -#, python-format -msgid "No endpoints found for service %s" -msgstr "Es wurden keine Endpunkte für den Service %s gefunden." - -msgid "" -"Non-metric meters may be collected. It is highly advisable to disable these " -"meters using ceilometer.conf or the pipeline.yaml" -msgstr "" -"Es werden möglicherweise nicht metrische Daten erfasst. Es wird dringend " -"empfohlen, diese Zähler über die Datei ceilometer.conf oder pipeline.yaml zu " -"inaktivieren." - -#, python-format -msgid "" -"Skipping %(name)s, %(service_type)s service is not registered in keystone" -msgstr "" -"%(name)s wird übersprungen. Der Service %(service_type)s ist nicht in " -"Keystone registriert." - -#, python-format -msgid "Skipping duplicate meter definition %s" -msgstr "Doppelte Messdefinition %s wird übersprungen." - -msgid "" -"ceilometer-api started with aodh enabled. Alarms URLs will be redirected to " -"aodh endpoint." -msgstr "" -"Die ceilometer-api wurde mit aktiviertem aodh gestartet. Alarm-URLs werden " -"an den aodh-Endpunkt umgeleitet. " - -msgid "" -"ceilometer-api started with gnocchi enabled. The resources/meters/samples " -"URLs are disabled." -msgstr "" -"Die ceilometer-api wurde mit aktiviertem Gnocchi gestartet. Die URLs für " -"resources/meters/samples sind inaktiviert." - -#, python-format -msgid "event signature invalid, discarding event: %s" -msgstr "Ereignissignatur ungültig. Ereignis wird verworfen: %s" - -#, python-format -msgid "" -"metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has no " -"volume (volume: None), the sample will be dropped" -msgstr "" -"Die Messung von Daten %(counter_name)s für %(resource_id)s @ %(timestamp)s " -"enthält keinen Datenträger (volume: None). Die Stichprobe wird gelöscht." - -#, python-format -msgid "" -"metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has " -"volume which is not a number (volume: %(counter_volume)s), the sample will " -"be dropped" -msgstr "" -"Die Messung von Daten %(counter_name)s für %(resource_id)s @ %(timestamp)s " -"enthält einen Datenträger ohne Zahl (volume: %(counter_volume)s). Die " -"Stichprobe wird gelöscht." - -msgid "" -"pecan_debug cannot be enabled, if workers is > 1, the value is overrided " -"with False" -msgstr "" -"pecan_debug kann nicht aktiviert werden, wenn Worker > 1 ist. Der Wert wird " -"mit False überschrieben." - -#, python-format -msgid "unable to configure oslo_cache: %s" -msgstr "Konfigurieren von oslo_cache nicht möglich: %s" diff --git a/ceilometer/locale/de/LC_MESSAGES/ceilometer.po b/ceilometer/locale/de/LC_MESSAGES/ceilometer.po deleted file mode 100644 index aad2eaf4..00000000 --- a/ceilometer/locale/de/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,522 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Carsten Duch , 2014 -# Christian Berendt , 2014 -# Ettore Atalan , 2014 -# Andreas Jaeger , 2016. #zanata -# Frank Kloeker , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-06-06 06:19+0000\n" -"Last-Translator: Andreas Jaeger \n" -"Language: de\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: German\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "%(entity)s %(id)s nicht gefunden" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "" -"Arithmetiktransformer muss mindestens eine Messgröße im Ausdruck '%s' " -"verwenden" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "" -"Tabelle %(table_name)s kann nicht erstellt werden, da sie bereits vorhanden " -"ist. Fehler wird ignoriert" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "Fortfahren nach Fehler von %(name)s: %(error)s" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "" -"Es konnte keine Verbindung zum untergeordneten Host hergestellt werden: %s " - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "Es konnte keine Verbindung zu XenAPI hergestellt werden: %s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "Abruf von CPU-Auslastung nicht möglich für %(id)s: %(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "Abruf von Speicherbelegung nicht möglich für %(id)s: %(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "CPU-Auslastung für VM %s konnte nicht abgerufen werden" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "IP-Adresse von Instanz %s konnte nicht abgerufen werden" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "Löschen von Benachrichtigung %(type)s (UUID:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"Fehler von libvirt während Suche nach Instanz : " -"[Fehlercode %(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "Fehler bei Auswertung der HTTP-Antwort %s" - -msgid "Error stopping pollster." -msgstr "Fehler beim Stoppen des Pollster." - -msgid "Event" -msgstr "Ereignis" - -msgid "Expression evaluated to a NaN value!" -msgstr "Ausdruck ergab einen NaN-Wert!" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "Fehler beim Importieren der Erweiterung für %(name)s: %(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"Fehler beim Überprüfen von Daten der Instanz , " -"Domänenstatus ist ABGESCHALTET." - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"Fehler beim Überprüfen der Speicherbelegung von %(instance_uuid)s, " -"Informationen können nicht von libvirt abgerufen werden: %(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"Fehler beim Überprüfen der Speicherbelegung von Instanz , Informationen können nicht von libvirt abgerufen werden." - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "Es konnten keine Benachrichtigungshandler für %s geladen werden" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "Zeitmarkenwert %s konnte nicht analysiert werden" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "%d Datenpunkte konnten nicht veröffentlicht werden; werden gelöscht" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "" -"%d Datenpunkte konnten nicht veröffentlicht werden; in Warteschlange " -"einreihen" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Filterausdruck nicht gültig: %s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "Instanz %(name)s (%(instance_id)s) wird ignoriert: %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "Instanz %(name)s wird ignoriert: %(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "Loadbalancer %(loadbalancer_id)s wird ignoriert." - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "Pool %(pool_id)s wird ignoriert." - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "" -"Ungültige YAML-Syntax in Definitionsdatei %(file)s in Zeile: %(line)s, " -"Spalte: %(column)s." - -#, python-format -msgid "Invalid aggregation function: %s" -msgstr "Ungültige Aggreation Funktion: %s" - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "Ungültiger Zeitraum %(period)s: %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "Ungültiger Traittyp '%(type)s' für Trait %(trait)s" - -msgid "Limit must be positive" -msgstr "Grenzwert muss positiv sein" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "Mehr als ein Ereignis mit der ID %s vom Speichertreiber zurückgegeben" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "Mehrere VMs %s in XenServer gefunden" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "" -"Angabe von connection_url und connection_password für die Verwendung " -"erforderlich" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "Kein Plug-in mit dem Namen %(plugin)s verfügbar für %(name)s." - -msgid "Node Manager init failed" -msgstr "Initialisierung von Knoten-Manager fehlgeschlagen" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "Nicht berechtigt für den Zugriff auf %(aspect)s %(id)s" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "OpenDaylight-API hat Folgendes zurückgegeben: %(status)s %(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "Opencontrail-API hat Folgendes zurückgegeben: %(status)s %(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "" -"Operator %(operator)s wird nicht unterstützt. Für das Feld %(field)s ist " -"nur der Gleichheitsoperator verfügbar." - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "" -"Operator %(operator)s wird nicht unterstützt. Unterstützte Operatoren: " -"%(supported)s" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "Ausdruck für 'Sortieren nach' nicht gültig: %s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "" -"Analysefehler in JSONPath-Spezifikation '%(jsonpath)s' für %(name)s: %(err)s" - -msgid "Period must be positive." -msgstr "Zeitraum muss positiv sein." - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "Pipeline %(pipeline)s: %(status)s nach Fehler von Publisher %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "Pipeline %(pipeline)s: Fortsetzen nach Fehler von Publisher %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "" -"Pipeline %(pipeline)s: Fehler bei Flushoperation für Transformer %(trans)s" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "" -"Pipeline %(pipeline)s: Beendigung nach Fehler von Transformer %(trans)s für " -"%(smp)s" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "Plug-in angegeben, aber kein Plug-in-Name für %s angegeben." - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "Polling von %(mtr)s-Sensor %(cnt)s Mal fehlgeschlagen!" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "Polling von %(name)s %(cnt)s Mal fehlgeschlagen!" - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "Pollster für %s ist inaktiviert!" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "" -"Verhindern Sie, dass Pollster %(name)s Quelle %(source)s weiterhin abfragt!" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "" -"Maximale Länge von local_queue für Publisher ist überschritten, die %d " -"ältesten Beispiele werden gelöscht" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "" -"Veröffentlichungsrichtlinie ist unbekannt (%s); auf Standardeinstellung " -"setzen" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "RGW-AdminOps-API hat Folgendes zurückgegeben: %(status)s %(reason)s" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "" -"Anforderung konnte keine Verbindung mit OpenDaylight über NorthBound REST-" -"API herstellen" - -#, python-format -msgid "Required field %s not specified" -msgstr "Erforderliches Feld %s nicht angegeben" - -msgid "Resource" -msgstr "Resource" - -msgid "Sample" -msgstr "Beispiel" - -msgid "Samples should be included in request body" -msgstr "Beispiele sollten in Anforderungshauptteil enthalten sein" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "Laden der Ausnahme für %s überspringen" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "Zeichenfolge %s ist kein gültiger Wert für 'isotime'" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "" -"Die YAML-Datei mit der Definition der Zuordnung zwischen Beispielen und " -"gnocchi-Ressourcen/Metriken" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"Der Datentyp %(type)s wird nicht unterstützt. Die Liste der unterstützten " -"Datentypen lautet: %(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "Das Feld 'fields' ist erforderlich für %s" - -msgid "The path for the file publisher is required" -msgstr "Der Pfad für den Datei-Publisher ist erforderlich" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UPD: Von %s gesendete Daten konnten nicht dekodiert werden" - -msgid "UDP: Unable to store meter" -msgstr "UDP: Messgröße kann nicht gespeichert werden" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "" -"Es kann keine Verbindung zum Datenbankserver hergestellt werden: %(errmsg)s." - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"Wert %(value)s kann nicht in den erwarteten Datentyp %(type)s umgewandelt " -"werden." - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "Ressourcen können nicht gefunden werden: %s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "Auswertung nicht möglich für Ausdruck %(expr)s: %(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "Publisher %s kann nicht geladen werden" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "Hypervisorinspector %s kann nicht geladen werden" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "" -"Es kann keine erneute Verbindung zur primären mongodb nach %(retries)d " -"Versuchen hergestellt werden. Abbruch." - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"Es kann keine erneute Verbindung zur primären mongodb hergestellt werden: " -"%(errmsg)s. Erneuter Versuch in %(retry_interval)d Sekunden." - -msgid "Unable to send sample over UDP" -msgstr "Beispiel kann nicht über UDP gesendet werden" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Unerwartete Ausnahme beim Konvertieren von %(value)s in den erwarteten " -"Datentyp %(type)s." - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "Unbekannte Erkennungserweiterung: %s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "Unbekannter Metadatentyp. Schlüssel (%s) wird nicht abfragbar sein." - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "" -"Unbekannten Status %(stat)s erhalten für Loadbalancer %(id)s; Beispiel wird " -"übersprungen" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "" -"Unbekannten Status %(stat)s erhalten für Firewall %(id)s; Beispiel wird " -"übersprungen" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "" -"Unbekannten Status %(stat)s erhalten für Listener %(id)s; Beispiel wird " -"übersprungen" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "" -"Unbekannten Status %(stat)s erhalten für Mitglied %(id)s; Beispiel wird " -"übersprungen" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "" -"Unbekannten Status %(stat)s erhalten für Pool %(id)s; Beispiel wird " -"übersprungen" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "" -"Unbekannten Status %(stat)s erhalten für VIP %(id)s; Beispiel wird " -"übersprungen" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "" -"Unbekannten Status %(stat)s erhalten für VPN %(id)s; Beispiel wird " -"übersprungen" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "VM %s in VMware vSphere nicht gefunden" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "VM %s in XenServer nicht gefunden" - -msgid "Wrong sensor type" -msgstr "Falscher Sensortyp" - -msgid "XenAPI not installed" -msgstr "XenAPI nicht installiert" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "YAML-Fehler beim Lesen von Definitionsdatei %(file)s." - -msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." -msgstr "" -"Alarm-URLs sind nicht verfügbar, wenn Aodh inaktiviert oder nicht verfügbar " -"ist." - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "Abruf von CPU-Zeit nicht möglich für %(id)s: %(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "" -"Wenn Gnocci aktiviert ist, kann die Option 'direct' nicht den Wert 'true' " -"haben. " - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "" -"Löschen des nicht in die zeitliche Reihenfolge gehörenden Beispiels: %s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "Beispiel ohne Vorgänger wird gelöscht: %s" - -msgid "ipmitool output length mismatch" -msgstr "Abweichung bei ipmitool-Ausgabelänge" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes und backup_count sollten Zahlen sein." - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "" -"Analyse von IPMI-Sensordaten fehlgeschlagen, keine Daten von angegebener " -"Eingabe abgerufen" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "Analyse von IPMI-Sensordaten fehlgeschlagen, unbekannter Sensortyp" - -msgid "running ipmitool failure" -msgstr "Fehler beim Ausführen von ipmitool" diff --git a/ceilometer/locale/es/LC_MESSAGES/ceilometer-log-error.po b/ceilometer/locale/es/LC_MESSAGES/ceilometer-log-error.po deleted file mode 100644 index fa38d329..00000000 --- a/ceilometer/locale/es/LC_MESSAGES/ceilometer-log-error.po +++ /dev/null @@ -1,132 +0,0 @@ -# Andreas Jaeger , 2016. #zanata -# Eugènia Torrella , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-18 11:52+0000\n" -"Last-Translator: Eugènia Torrella \n" -"Language-Team: Spanish\n" -"Language: es\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -#, python-format -msgid "Cannot load inspector %(name)s: %(err)s" -msgstr "No se ha podido cargar el inspector %(name)s: %(err)s" - -#, python-format -msgid "Could not get Resident Memory Usage for %(id)s: %(e)s" -msgstr "No se ha podido obtener el uso de memoria residente para %(id)s: %(e)s" - -#, python-format -msgid "Dispatcher failed to handle the %s, requeue it." -msgstr "El asignador no ha podido manejar el %s, vuelva a ponerlo en la cola." - -msgid "Error connecting to coordination backend." -msgstr "Error de conexión con el servidor coordinador." - -msgid "Error getting group membership info from coordination backend." -msgstr "" -"Error al obtener información de pertenencia a grupos del servidor " -"coordinador." - -#, python-format -msgid "Error joining partitioning group %s, re-trying" -msgstr "Error al unirse al grupo de partición %s, se está reintentando" - -#, python-format -msgid "Error processing event and it will be dropped: %s" -msgstr "Se ha producido un error al procesar el suceso y se descartará: %s" - -msgid "Error sending a heartbeat to coordination backend." -msgstr "Error al enviar una señal de latido al servidor coordinador." - -msgid "Fail to process a notification" -msgstr "Error al procesar una notificación" - -msgid "Fail to process notification" -msgstr "No se ha podido procesar la notificación" - -msgid "Failed to connect to Gnocchi." -msgstr "No se ha podido conectar con Gnocchi." - -#, python-format -msgid "Failed to connect to Kafka service: %s" -msgstr "No se ha podido conectar con el servicio Kafka: %s" - -#, python-format -msgid "Failed to connect to db, purpose %(purpose)s re-try later: %(err)s" -msgstr "" -"No se ha podido establecer conexión con la base de datos con el propósito " -"%(purpose)s. Vuelva a intentarlo más tarde: %(err)s" - -#, python-format -msgid "Failed to connect to db, purpose %(purpose)s retry later: %(err)s" -msgstr "" -"No se ha podido establecer conexión con la base de datos con el propósito " -"%(purpose)s. Vuelva a intentarlo más tarde: %(err)s" - -#, python-format -msgid "Failed to load resource due to error %s" -msgstr "No se ha podido cargar el recurso debido a un error: %s" - -#, python-format -msgid "Failed to record event: %s" -msgstr "No se ha podido registrar el suceso: %s" - -msgid "Failed to retry to send sample data with max_retry times" -msgstr "" -"No se ha podido volver a intentar enviar datos de ejemplo max_retry veces" - -msgid "" -"Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: Current agent is " -"not part of group and cannot take tasks" -msgstr "" -"ID de grupo: %{group_id}s, Miembros: %{members}s, Yo: %{me}s: El agente " -"actual no forma parte del grupo y no puede coger tareas" - -#, python-format -msgid "Invalid type %s specified" -msgstr "Se ha especificado un tipo no válido: %s" - -#, python-format -msgid "Missing field %s" -msgstr "Falta el campo %s" - -msgid "Passed resource dict must contain keys resource_id and resource_url." -msgstr "" -"El dicionario de recursos que se pase debe contener las claves resource_id y " -"resource_url" - -#, python-format -msgid "Required field %(field)s should be a %(type)s" -msgstr "El campo obligatorio %(field)s s debería ser un %(type)s" - -#, python-format -msgid "Required field %s not specified" -msgstr "No se ha especificado el campo obligatorio %s" - -#, python-format -msgid "Required fields %s not specified" -msgstr "No se han especificado los campos obligatorios %s" - -#, python-format -msgid "Skip invalid resource %s" -msgstr "Omitir el recurso no válido %s" - -msgid "Status Code: %{code}s. Failed todispatch event: %{event}s" -msgstr "" -"Código de estado: %{code}s. No se ha podido asignar el suceso: %{event}s" - -#, python-format -msgid "Unrecognized type value %s" -msgstr "Valor de tipo no reconocido %s" - -#, python-format -msgid "inspector call failed for %(ident)s host %(host)s: %(err)s" -msgstr "Error en la llamada al inspector del host %(ident)s %(host)s: %(err)s" diff --git a/ceilometer/locale/es/LC_MESSAGES/ceilometer-log-info.po b/ceilometer/locale/es/LC_MESSAGES/ceilometer-log-info.po deleted file mode 100644 index 5a8e6a4d..00000000 --- a/ceilometer/locale/es/LC_MESSAGES/ceilometer-log-info.po +++ /dev/null @@ -1,139 +0,0 @@ -# Eugènia Torrella , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev57\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-04-18 02:09+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-18 02:45+0000\n" -"Last-Translator: Eugènia Torrella \n" -"Language-Team: Spanish\n" -"Language: es\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1)\n" - -#, python-format -msgid "%d events are removed from database" -msgstr "Se han eliminado %d sucesos de la base de datos" - -#, python-format -msgid "%d samples removed from database" -msgstr "Se han eliminado %d ejemplos de la base de datos" - -msgid "Configuration:" -msgstr "Configuración:" - -#, python-format -msgid "Connecting to %(db)s on %(nodelist)s" -msgstr "Se está estableciendo conexión con %(db)s en %(nodelist)s" - -msgid "Coordination backend started successfully." -msgstr "El servidor coordinador se ha iniciado satisfactoriamente." - -#, python-format -msgid "Definitions: %s" -msgstr "Definiciones: %s" - -msgid "Detected change in pipeline configuration." -msgstr "Se ha detectado un cambio en la configuración de la interconexión." - -#, python-format -msgid "Dropping event data with TTL %d" -msgstr "Descartando datos de sucesos con TTL %d" - -#, python-format -msgid "Dropping metering data with TTL %d" -msgstr "Descartando datos de calibración con TTL %d" - -#, python-format -msgid "Duplicate event detected, skipping it: %s" -msgstr "Se ha detectado un suceso duplicado, se omitirá: %s" - -msgid "Expired residual resource and meter definition data" -msgstr "El recurso residual y los datos de definición del medidor han caducado" - -#, python-format -msgid "Index %s will be recreate." -msgstr "Se volverá a crear el índice %s." - -#, python-format -msgid "Joined partitioning group %s" -msgstr "Se ha unido al grupo de partición %s" - -#, python-format -msgid "Left partitioning group %s" -msgstr "Ha dejado el grupo de partición %s" - -#, python-format -msgid "No limit value provided, result set will be limited to %(limit)d." -msgstr "" -"No se ha proporcionado ningún valor límite, el conjunto de resultados estará " -"limitado a %(limit)d." - -msgid "Nothing to clean, database event time to live is disabled" -msgstr "" -"No hay nada que limpiar, el tiempo de vida de sucesos de base de datos está " -"inhabilitado" - -msgid "Nothing to clean, database metering time to live is disabled" -msgstr "" -"No hay nada que limpiar, el tiempo de vida de medición de base de datos está " -"inhabilitado" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter " -"%(param)s" -msgstr "" -"Interconexión %(pipeline)s: Configure la instancia de transformador %(name)s " -"con el parámetro %(param)s" - -#, python-format -msgid "Pipeline config: %s" -msgstr "Configuración de interconexión: %s" - -msgid "Pipeline configuration file has been updated." -msgstr "Se ha actualizado el archivo de configuración de la interconexión." - -#, python-format -msgid "Polling pollster %(poll)s in the context of %(src)s" -msgstr "Sondeando pollster %(poll)s en el contexto de %(src)s" - -#, python-format -msgid "Publishing policy set to %s" -msgstr "Política de publicación establecida en %s" - -msgid "Reconfiguring polling tasks." -msgstr "Reconfigurando las tareas de sondeo." - -msgid "Reloading notification agent and listeners." -msgstr "Recargando la notificación, el agente y los escuchas." - -#, python-format -msgid "Skip pollster %(name)s, no %(p_context)sresources found this cycle" -msgstr "" -"Omitir pollster %(name)s, ningún recurso de %(p_context)s ha encontrado " -"este ciclo" - -#, python-format -msgid "Starting server in PID %s" -msgstr "Iniciando servidor en PID %s" - -msgid "detected decoupled pipeline config format" -msgstr "" -"se ha detectado un formato de configuración de interconexión desacoplado" - -#, python-format -msgid "metering data %(counter_name)s for %(resource_id)s: %(counter_volume)s" -msgstr "" -"datos de medición %(counter_name)s para %(resource_id)s: %(counter_volume)s" - -#, python-format -msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s" -msgstr "sirviendo en 0.0.0.0:%(sport)s, vista en http://127.0.0.1:%(vport)s" - -#, python-format -msgid "serving on http://%(host)s:%(port)s" -msgstr "sirviendo en http://%(host)s:%(port)s" diff --git a/ceilometer/locale/es/LC_MESSAGES/ceilometer.po b/ceilometer/locale/es/LC_MESSAGES/ceilometer.po deleted file mode 100644 index a071f451..00000000 --- a/ceilometer/locale/es/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,511 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Rafael Rivero , 2015 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-24 11:03+0000\n" -"Last-Translator: Eugènia Torrella \n" -"Language: es\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Spanish\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "%(entity)s %(id)s No encontrado" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "" -"El transformador aritmético debe utilizar al menos un medidor en la " -"expresión '%s'" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "" -"No se puede crear la tabla %(table_name)s, ya existe. Se ignorará el error." - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "Continuar después de error desde %(name)s: %(error)s" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "No se ha podido conectar con el host esclavo: %s" - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "No se puede conectar a XenAPI: %s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "No se ha podido obtener CPU Util para %(id)s: %(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "No se ha podido obtener el uso de memoria para %(id)s: %(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "No se puede obtener la utilización de CPU de VM %s" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "No se ha podido obtener la dirección IP de la instancia %s" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "Descartando la notificación %(type)s (uuid:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"Error de libvirt al buscar la instancia : [Código " -"de error %(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "Error analizándo respuesta HTTP: %s." - -msgid "Error stopping pollster." -msgstr "Error al detener el pollster." - -msgid "Event" -msgstr "Suceso" - -msgid "Expression evaluated to a NaN value!" -msgstr "La expresión se ha evaluado en un valor NaN." - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "No se ha podido importar la extensión para %(name)s: %(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"No se han podido analizar los datos de la instancia , el estado del dominio es SHUTOFF." - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"No se ha podido analizar el uso de memoria de %(instance_uuid)s, no se puede " -"obtener información de libvirt: %(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"No se ha podido analizar el uso de memoria de la instancia , no se puede obtener información de libvirt." - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "No se ha podido cargar ningún manejador de notificación para %s" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "No se ha podido analizar el valor de indicación de fecha y hora %s" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "No se han podido publicar los puntos de datos %d, descartándolos" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "No se han podido publicar los puntos de datos %d, póngalos en cola" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Expresión de filtro no válida: %s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "Ignorando la instancia %(name)s (%(instance_id)s) : %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "Ignorando la instancia %(name)s: %(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "Se ignorará el equilibrador de carga %(loadbalancer_id)s" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "Se ignorará la agrupación %(pool_id)s" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "" -"Sintaxis de YAML no válida en archivo de definiciones %(file)s en la línea: " -"%(line)s, columna: %(column)s." - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "Periodo no válido %(period)s: %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "Tipo de rasgo no válido '%(type)s' para el rasgo %(trait)s" - -msgid "Limit must be positive" -msgstr "El límite debe ser positivo" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "" -"Se ha devuelto más de un suceso con el %s del controlador de almacenamiento" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "Se han encontrado varias VM %s en XenServer" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "" -"Debe especificar el url_conexión y la contraseña_conexión para utilizar" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "No hay ningún plug-in denominado %(plugin)s disponible para %(name)s" - -msgid "Node Manager init failed" -msgstr "El inicio de Gestor de nodos ha fallado" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "No está autorizado para acceder a %(aspect)s %(id)s" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "La API OpenDaylitght ha devuelto %(status)s %(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "La API Opencontrail ha devuelto %(status)s %(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "" -"El operador %(operator)s no se admite. Solo hay disponible el operador de " -"igualdad para el campo %(field)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "" -"El operador %(operator)s no está admitido. Los operadores admitidos son: " -"%(supported)s" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "Expresión de ordenar por no válida: %s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "" -"Error de análisis en especificación de JSONPath '%(jsonpath)s' para " -"%(name)s: %(err)s" - -msgid "Period must be positive." -msgstr "El período debe ser positivo." - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "" -"Interconexión %(pipeline)s: %(status)s tras el error de la aplicación de " -"publicación %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "" -"Interconexión %(pipeline)s: Continúe tras el error de la aplicación de " -"publicación %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "Interconexión %(pipeline)s: Error al vaciar el transformador %(trans)s" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "" -"Interconexión %(pipeline)s: Salga tras error del transformador %(trans)s " -"para %(smp)s" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "" -"Se ha especificado un plug-in, pero no se ha proporcionado ningún nombre de " -"plug-in para %s" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "¡El sensor de sondeo %(mtr)s ha fallado %(cnt)s veces!" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "El sondeo %(name)s ha fallado %(cnt)s veces." - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "¡El Pollster para %s está inhabilitado!" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "¡Impedir pollster %(name)s para el origen de sondeo %(source)s ahora!" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "" -"Se supera la longitud máxima de aplicación de publicación local_queue, " -"descartando los ejemplos más antiguos %d" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "" -"No se conoce la política de publicación (%s) forzar para tomar el valor " -"predeterminado" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "La API de RGW AdminOps ha devuelto %(status)s %(reason)s" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "" -"La solicitud no ha podido conectar con OpenDaylight con la API REST " -"NorthBound" - -#, python-format -msgid "Required field %s not specified" -msgstr "Campo necesario %s no especificado" - -msgid "Resource" -msgstr "Recurso" - -msgid "Sample" -msgstr "Muestra" - -msgid "Samples should be included in request body" -msgstr "Los ejemplos se deben incluir en el cuerpo de la solicitud" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "Omitir la extensión de carga para %s" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "La serie %s no es una hora iso válida" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "" -"El archivo Yaml que define la correlación entre los ejemplos y recursos/" -"métricas gnocchi" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"El tipo de datos %(type)s no es compatible. La lista de tipo de datos " -"admitido es: %(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "El campo 'campos' es obligatorio para %s" - -msgid "The path for the file publisher is required" -msgstr "" -"La vía de acceso para la aplicación de publicación de archivos es necesaria" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UDP: no se pueden decodificar los datos enviados por %s" - -msgid "UDP: Unable to store meter" -msgstr "UDP: no se puede almacenar el medidor" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "No se ha podido conectar con el servidor de base de datos: %(errmsg)s." - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"No se ha podido convertir el valor %(value)s al tipo de datos esperado " -"%(type)s." - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "No se pueden descubrir recursos: %s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "No se puede evaluar la expresión %(expr)s: %(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "No se puede cargar la aplicación de publicación %s" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "No se puede cargar el inspector de hipervisor: %s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "" -"No se ha podido volver a conectar con la mongodb primaria después de " -"%(retries)d intentos. Se va a abandonar." - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"No se ha podido volver a conectar con la mongodb primaria: %(errmsg)s. Se " -"volverá a intentar en %(retry_interval)d segundos." - -msgid "Unable to send sample over UDP" -msgstr "No se ha podido enviar una muestra sobre UDP" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Excepción inesperada al convertir %(value)s al tipo de dato esperado " -"%(type)s." - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "Extensión de descubrimiento desconocida: %s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "Tipo de metadatos desconocido. La clave (%s) no se podrá consultar." - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "" -"Se ha recibido un estado desconocido %(stat)s en el equilibrador de carga " -"%(id)s, se omitirá el ejemplo" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "" -"Se ha recibido un estado desconocido %(stat)s en fw %(id)s, se omitirá el " -"ejemplo" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "" -"Se ha recibido un estado desconocido %(stat)s en el escucha %(id)s, se " -"omitirá el ejemplo" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "" -"Se ha recibido un estado desconocido %(stat)s en el miembro %(id)s, se " -"omitirá el ejemplo" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "" -"Se ha recibido un estado desconocido %(stat)s en la agrupación %(id)s, se " -"omitirá el ejemplo" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "" -"Se ha recibido un estado desconocido %(stat)s en vip %(id)s, se omitirá el " -"ejemplo" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "" -"Se ha recibido un estado desconocido %(stat)s en vpn %(id)s, se omitirá el " -"ejemplo" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "VM %s no se ha encontrado en VMware vSphere" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "No se han encontrado VM %s en XenServer" - -msgid "Wrong sensor type" -msgstr "Tipo de sensor incorrecto" - -msgid "XenAPI not installed" -msgstr "XenAPI no está instalado" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "Error de YAML al leer el archivo de definiciones %(file)s" - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "no se ha podido obtener tiempo de CPU para %(id)s: %(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "" -"la opción directo no puede estar definida como true cuando Gnocchi esté " -"habilitado." - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "saliendo del ejemplo de orden de tiempo: %s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "eliminando la muestra sin predecesor: %s" - -msgid "ipmitool output length mismatch" -msgstr "la longitud de salida de ipmitool no coincide" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes y backup_count deben ser números." - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "" -"ha fallado el análisis de datos de sensor IPMI,no se ha recuperado ningún " -"dato de la entrada" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "" -"ha fallado el análisis de datos de sensor IPMI,tipo de sensor desconocido" - -msgid "running ipmitool failure" -msgstr "fallo de ejecución de ipmitool" diff --git a/ceilometer/locale/fr/LC_MESSAGES/ceilometer.po b/ceilometer/locale/fr/LC_MESSAGES/ceilometer.po deleted file mode 100644 index deff1930..00000000 --- a/ceilometer/locale/fr/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,516 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Corinne Verheyde , 2013 -# CHABERT Loic , 2013 -# Christophe kryskool , 2013 -# Corinne Verheyde , 2013-2014 -# EVEILLARD , 2013-2014 -# Francesco Vollero , 2015 -# Jonathan Dupart , 2014 -# CHABERT Loic , 2013 -# Maxime COQUEREL , 2014 -# Nick Barcet , 2013 -# Nick Barcet , 2013 -# Andrew Melim , 2014 -# Patrice LACHANCE , 2013 -# Patrice LACHANCE , 2013 -# Rémi Le Trocquer , 2014 -# EVEILLARD , 2013 -# Corinne Verheyde , 2013 -# Corinne Verheyde , 2013 -# Andreas Jaeger , 2016. #zanata -# Angelique Pillal , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-29 08:24+0000\n" -"Last-Translator: Angelique Pillal \n" -"Language: fr\n" -"Plural-Forms: nplurals=2; plural=(n > 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: French\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "%(entity)s %(id)s n'a pas été trouvé" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "" -"Le transformateur arithmétique doit utiliser au moins un mètre dans " -"l'expression '%s'" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "" -"Impossible de créer la table %(table_name)s car elle existe déjà. Erreur " -"ignorée" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "Continue après l'erreur %(name)s: %(error)s " - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "Impossible de se connecter à l'hôte slave: %s " - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "Connexion impossible XenAPI: %s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "Ne peut pas recevoir l'utilisation CPU pour %(id)s: %(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "" -"Impossible de récupérer l'utilisation de la mémoire pour %(id)s : %(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "Impossible d'obtenir l'utilisation CPU de la VM %s" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "Impossible d'obtenir l'adresse IP de l'instance %s" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "Suppression du %(type)s de notification (uuid:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"Erreur de libvirt lors de la recherche de l'instance : [Code d'erreur %(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "Erreur lors de l'analyse syntaxique de la réponse: %s" - -msgid "Error stopping pollster." -msgstr "Erreur lors de l'arrêt du sondeur." - -msgid "Event" -msgstr "Événement" - -msgid "Expression evaluated to a NaN value!" -msgstr "Expression évaluée avec une valeur not-a-number !" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "Echec de l'importation de l'extension pour %(name)s: %(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"Echec de l'inspection des données de l'instance . " -"Le domaine est à l'état SHUTOFF (INTERRUPTION)." - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"Echec de l'inspection de l'utilisation de la mémoire de %(instance_uuid)s. " -"Impossible d'obtenir des informations de libvirt : %(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"Echec de l'inspection de l'utilisation de la mémoire de l'instance . Impossible d'obtenir des informations de libvirt." - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "Échec du chargement de tous les gestionnaires de notification pour %s" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "Echec de l'analyse syntaxique de la valeur d'horodatage %s" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "Echec de la publication des points de données %d. Suppression en cours" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "" -"Echec de la publication des points de données %d. Mettez-les en file " -"d'attente" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Filtre de l'expression n'est pas valide: %s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "L'instance %(name)s est ignorée (%(instance_id)s) : %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "instance %(name)s: %(error)s ignoré" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "Loadbalancer %(loadbalancer_id)s ignoré" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "Pool %(pool_id)s ignoré" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "" -"Syntaxe YAML non valide dans le fichier de définitions %(file)s à la ligne : " -"%(line)s, colonne : %(column)s." - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "Période %(period)s non valide : %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "Type de trait non valide '%(type)s' pour le trait %(trait)s" - -msgid "Limit must be positive" -msgstr "La limite doit être positive" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "" -"Plus d'un événement avec l'identificateur %s a été renvoyé à partir du " -"pilote de stockage" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "Plusieurs machines virtuelles %s trouvées dans XenServer" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "Il faut indiquer connection_url et connection_password pour utiliser" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "Aucun plugin nommé %(plugin)s n'est disponible pour %(name)s" - -msgid "Node Manager init failed" -msgstr "Echec de l'initialisation du gestionnaire de noeud" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "Non autorisé à accéder %(aspect)s %(id)s " - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "L'API OpenDaylight a renvoyé %(status)s %(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "L'API Opencontrail a renvoyé %(status)s %(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "" -"Opérateur %(operator)s non supporté. Seul l'opérateur égalité est disponible " -"pour le champ %(field)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "" -"L'opérateur %(operator)s n'est pas supporté. Les opérateurs supportés sont: " -"%(supported)s" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "L'expression de tri n'est pas valide : %s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "" -"Erreur d'analyse dans la spécification JSONPath '%(jsonpath)s' pour " -"%(name)s : %(err)s" - -msgid "Period must be positive." -msgstr "La période doit être positive." - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "" -"Pipeline %(pipeline)s : statut %(status)s après erreur du diffuseur %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "Pipeline %(pipeline)s: Reprise après une erreur de l'éditeur %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "Pipeline %(pipeline)s: Erreur à la purge du transformateur %(trans)s" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "" -"Pipeline %(pipeline)s: Sortie après erreur du transformateur %(trans)s pour " -"%(smp)s" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "Plugin spécifié, mais aucun nom de plugin n'est fourni pour %s" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "L'interrogation du capteur %(mtr)s a échoué %(cnt)s fois !" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "Sondage de %(name)s %(cnt)s fois en échec!" - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "Le pollster pour %s est désactivé !" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "Empêcher le pollster %(name)s d'interroger la source %(source)s !" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "" -"La longueur maximale de local_queue du diffuseur est dépassée, suppression " -"des %d échantillons les plus anciens" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "La politique de publication est inconnue (%s) forcé le défaut" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "L'API AdminOps RGW a renvoyé %(status)s %(reason)s" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "" -"La demande n'a pas réussi à se connecter à OpenDaylight avec l'API REST " -"NorthBound" - -#, python-format -msgid "Required field %s not specified" -msgstr "Champ requis %s non spécifiée" - -msgid "Resource" -msgstr "Ressource" - -msgid "Sample" -msgstr "Echantillon" - -msgid "Samples should be included in request body" -msgstr "Des exemples doivent être inclus dans le corps de demande" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "Passer le chargement de l'extension pour %s" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "La chaine de caractère %s n'est pas valide isotime" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "" -"Fichier Yaml qui définit le mappage entre les exemples et les ressources " -"gnocchi /les métriques" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"Le type de données %(type)s n'est pas supporté. Les types de données " -"supportés sont: %(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "Le champ 'fields' est requis pour %s" - -msgid "The path for the file publisher is required" -msgstr "Le chemin du éditeur de fichier est obligatoire " - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UDP: Impossible de décoder les données envoyées par %s" - -msgid "UDP: Unable to store meter" -msgstr "UDP: Impossible de stocker les mesures" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "Impossible de se connecter au serveur de base de données : %(errmsg)s." - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"Impossible de convertir la valeur %(value)s vers le type de données attendu " -"%(type)s." - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "Impossible de découvrir les ressources: %s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "Impossible d'évaluer l'expression %(expr)s : %(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "Impossible de charger l'éditeur %s " - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "Impossible de télécharger l'inspecteur hypervisor: %s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "" -"Impossible de se reconnecter au serveur mongodb principal après %(retries)d " -"tentatives. Abandon." - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"Impossible de se reconnecter au serveur mongodb principal : %(errmsg)s. " -"Nouvelle tentative dans %(retry_interval)d secondes." - -msgid "Unable to send sample over UDP" -msgstr "Impossible d'envoyer l'échantillon en UDP" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Exception inattendue lors de la conversion de %(value)s dans le type de " -"donnée attendue %(type)s." - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "Découverte d'une extension inconnue: %s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "Type de métadonnées inconnu, la clé (%s) n'est pas requêtable" - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "" -"Statut %(stat)s inconnu reçu sur le Load Balancer %(id)s, échantillon ignoré" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "Etat %(stat)s inconnu reçu sur le pare-feu %(id)s, échantillon ignoré" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "Etat %(stat)s inconnu reçu sur le listener %(id)s, échantillon ignoré" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "Etat %(stat)s inconnu reçu sur le membre %(id)s, échantillon ignoré" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "Etat %(stat)s inconnu reçu sur le pool %(id)s, échantillon ignoré" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "" -"Etat %(stat)s inconnu reçu sur l'IP virtuelle %(id)s, échantillon ignoré" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "Etat %(stat)s inconnu reçu sur le vpn %(id)s, échantillon ignoré" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "La machine virtuelle %s est introuvable dans VMware vSphere" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "VM %s non trouvé dans XenServer" - -msgid "Wrong sensor type" -msgstr "Type de détecteur incorrect" - -msgid "XenAPI not installed" -msgstr "XenAPI n'est pas installé" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "Erreur YAML lors de la lecture du fichier de définitions %(file)s" - -msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." -msgstr "" -"Les URL d'alarmes ne sont pas disponibles lorsque Aodh est désactivé ou non " -"disponible." - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "impossible d'obtenir le temps UC pour %(id)s : %(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "L'option directe ne peut pas être à vrai si Gnocchi est activé." - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "suppression de l'exemple de classement dans le temps : %s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "abandon de l'échantillon sans prédécesseur: %s" - -msgid "ipmitool output length mismatch" -msgstr "Non-concordance de longueur de la sortie ipmitool" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes et backup_count doivent etre des chiffres." - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "" -"Echec de l'analyse des données du détecteur IPMI, aucune donnée extraite à " -"partir de l'entrée fournie" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "" -"Echec de l'analyse des données du détecteur IPMI, type de détecteur inconnu" - -msgid "running ipmitool failure" -msgstr "Echec d'exécution d'ipmitool" diff --git a/ceilometer/locale/it/LC_MESSAGES/ceilometer.po b/ceilometer/locale/it/LC_MESSAGES/ceilometer.po deleted file mode 100644 index 6dba5a8e..00000000 --- a/ceilometer/locale/it/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,505 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Stefano Maffulli , 2013 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-12 02:04+0000\n" -"Last-Translator: Alessandra \n" -"Language: it\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Italian\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "%(entity)s %(id)s non trovato" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "" -"Il trasformatore aritmetico deve utilizzare almeno un contatore " -"nell'espressione '%s'" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "" -"Impossibile creare la tabella %(table_name)s la tabella già esiste. " -"Ignorare l'errore" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "Continua dopo errore da %(name)s: %(error)s" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "Impossibile connettersi all'host slave: %s " - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "Impossibile connettersi a XenAPI: %s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "Impossibile ricevere CPU Util per %(id)s: %(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "Impossibile ricevere l'Uso della Memoria per %(id)s: %(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "Impossibile conoscere l'utilizzo CPU della VM %s" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "Impossibile ottenere l'indirizzo IP dell'istanza %s" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "Eliminazione della notifica %(type)s (uuid:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"Errore da libvirt durante la ricerca dell'istanza : [Codice di errore %(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "Errore durante l'analisi della risposta HTTP: %s" - -msgid "Error stopping pollster." -msgstr "Errore durante l'arresto del sondaggio. " - -msgid "Event" -msgstr "Evento" - -msgid "Expression evaluated to a NaN value!" -msgstr "Espressione valutata a un valore NaN!" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "Impossibile importare l'estensione per %(name)s: %(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"Impossibile ispezionare i dati dell'istanza , " -"stato dominio SHUTOFF." - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"Impossibile ispezionare l'utilizzo della memoria da parte di " -"%(instance_uuid)s, impossibile ottenere informazioni da libvirt: %(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"Impossibile ispezionare l'utilizzo della memoria da parte dell'istanza , impossibile ottenere informazioni da libvirt." - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "Impossibile caricare eventuali gestori di notifica per %s" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "Impossibile analizzare il valore data/ora %s" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "Impossibile pubblicare %d datapoint, eliminati" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "Impossibile pubblicare %d datapoint, inseriti in coda" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Espressione del filtro non valida: %s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "L'istanza %(name)s (%(instance_id)s) viene ignorata: %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "Si sta ignorando l'istanza %(name)s: %(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "Ignora loadbalancer %(loadbalancer_id)s" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "Ignora pool %(pool_id)s" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "" -"Sintassi YAML non valida nel file delle definizioni %(file)s alla riga: " -"%(line)s, colonna: %(column)s." - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "Periodo non valido %(period)s: %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "" -"Tipo di caratteristica non valido '%(type)s' per la caratteristica %(trait)s" - -msgid "Limit must be positive" -msgstr "Il limite deve essere un positivo" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "Più di un evento con id %s restituito dal driver di archiviazione" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "Più VM %s trovate in XenServer" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "" -"È necessario specificare connection_url e connection_password da utilizzare" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "Nessun plug-in con nome %(plugin)s disponibile per %(name)s" - -msgid "Node Manager init failed" -msgstr "Inizializzazione gestore nodi non riuscita" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "Non autorizzato ad accedere %(aspect)s %(id)s" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "L'API OpenDaylitght ha restituito %(status)s %(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "L'API Opencontrail ha restituito %(status)s %(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "" -"Operatore %(operator)s non è supportato. Solo gli operatori di uguaglianza " -"sono disponibili per il campo %(field)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "" -"Operatore %(operator)s non è supportato. Gli operatori supportati sono: " -"%(supported)s" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "L'espressione ordina per non è valida: %s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "" -"Errore di analisi nella specifica JSONPath '%(jsonpath)s' per %(name)s: " -"%(err)s" - -msgid "Period must be positive." -msgstr "Il periodo deve essere positivo" - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "Pipeline %(pipeline)s: %(status)s dopo errore da publisher %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "Pipeline %(pipeline)s: Continuare dopo errore da publisher %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "" -"Pipeline %(pipeline)s: errore durante lo scaricamento del trasformatore " -"%(trans)s" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "" -"Pipeline %(pipeline)s: Uscita dopo errore del trasformatore %(trans)s per " -"%(smp)s" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "Plug-in specificato, ma nessun nome di plug-in fornito per %s" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "Polling del sensore %(mtr)s non riuscito per %(cnt)s volte!" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "Polling di %(name)s non riuscito per %(cnt)s volte!" - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "Pollster per %s disabilitato!" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "" -"Impedire al pollster %(name)s di eseguire il polling dell'origine %(source)s." - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "" -"La lunghezza local_queue massima del publisher è stata superata, " -"eliminazione di esempi %d meno recenti" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "" -"La politica di pubblicazione è sconosciuta (%s), applicazione del valore " -"predefinito" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "L'API RGW AdminOps ha restituito %(status)s %(reason)s" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "" -"Richiesta di collegamento a OpenDaylight con API NorthBound REST non riuscita" - -#, python-format -msgid "Required field %s not specified" -msgstr "Campo richiesto %s non specificato" - -msgid "Resource" -msgstr "Risorsa" - -msgid "Sample" -msgstr "Esempio" - -msgid "Samples should be included in request body" -msgstr "I campioni devono essere inclusi nel corpo della richiesta " - -#, python-format -msgid "Skip loading extension for %s" -msgstr "Ignora caricamento dell'estensione per %s" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "La stringa %s non è un orario standard (isotime) valido" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "" -"Il file Yaml che definisce l'associazione tra i campioni e le risorse " -"gnocchi/metriche" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"Il tipo di dati %(type)s non è supportato. L'elenco dei tipi di dati " -"supportati è: %(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "Il campo 'fields' è obbligatorio per %s" - -msgid "The path for the file publisher is required" -msgstr "Il percorso per il publisher di file è obbligatorio" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UDP: impossibile decodificare i dati inviati da %s" - -msgid "UDP: Unable to store meter" -msgstr "UDP: impossibile memorizzare il contatore" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "Impossibile connettersi al server di database: %(errmsg)s." - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"Impossibile convertire il valore %(value)s nel tipo di dati previsto " -"%(type)s." - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "Impossibile rilevare le risorse: %s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "Impossibile valutare l'espressione %(expr)s: %(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "Impossibile caricare il publisher %s" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "Impossibile caricare il programma di controllo hypervisor: %s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "" -"Impossibile riconnettersi al mongodb primario dopo %(retries)d tentativi. " -"L'operazione viene interrotta." - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"Impossibile connettersi al mongodb primario: %(errmsg)s. Prossimo tentativo " -"tra %(retry_interval)d secondi." - -msgid "Unable to send sample over UDP" -msgstr "Impossibile inviare l'esempio su UDP" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Eccezione non prevista durante la conversione di %(value)s per il tipo di " -"dati previsto %(type)s." - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "Estensione di rilevamento sconosciuta: %s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "" -"Tipo di metadati sconosciuto. La chiave (%s) non potrà essere sottoposta a " -"query." - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "" -"Stato non conosciuto %(stat)s ricevuto su bilanciatore del carico %(id)s, " -"ignorare l'esempio" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "Stato non conosciuto %(stat)s ricevuto su fw %(id)s,ignorare l'esempio" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "" -"Stato non conosciuto %(stat)s ricevuto su listener %(id)s, ignorare l'esempio" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "" -"Stato non conosciuto %(stat)s ricevuto su membro %(id)s, ignorare l'esempio" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "" -"Stato non conosciuto %(stat)s ricevuto sul pool %(id)s, ignorare l'esempio" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "" -"Stato non conosciuto %(stat)s ricevuto su vip %(id)s, ignorare l'esempio" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "" -"Stato non conosciuto %(stat)s ricevuto su vpn %(id)s, ignorare l'esempio" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "VM %s non trovata in VMware vSphere" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "VM %s non trovata in XenServer" - -msgid "Wrong sensor type" -msgstr "Tipo di sensore errato" - -msgid "XenAPI not installed" -msgstr "XenAPI non installato" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "Errore YAML durante la lettura del file definizioni %(file)s" - -msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." -msgstr "" -"alarm URLs non è disponibile con Aodh perché disabilitato oppure non " -"disponibile " - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "impossibile ricevere l'ora CPU per %(id)s: %(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "L'opzione direct non può essere true quando Gnocchi è abilitato." - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "rilascio campione ordinamento fuori tempo: %s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "eliminazione in corso dell'esempio senza predecessore: %s" - -msgid "ipmitool output length mismatch" -msgstr "mancata corrispondenza della lunghezza dell'output ipmitool" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes e backup_count devono essere numeri." - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "" -"analisi dei dati del sensore IPMI non riuscita, nessun dato recuperato " -"dall'input fornito" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "" -"analisi dei dati del sensore IPMI non riuscita, tipo di sensore sconosciuto" - -msgid "running ipmitool failure" -msgstr "errore nell'esecuzione ipmitool" diff --git a/ceilometer/locale/ja/LC_MESSAGES/ceilometer.po b/ceilometer/locale/ja/LC_MESSAGES/ceilometer.po deleted file mode 100644 index 1c44ae93..00000000 --- a/ceilometer/locale/ja/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,506 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Tomoyuki KATO , 2013 -# Andreas Jaeger , 2016. #zanata -# 笹原 昌美 , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-16 11:33+0000\n" -"Last-Translator: 笹原 昌美 \n" -"Language: ja\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Japanese\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "%(entity)s %(id)s が見つかりません" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "" -"演算変換プログラムは、式 '%s' で少なくとも 1 つのメーターを使用する必要があり" -"ます" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "" -"テーブル %(table_name)s は既に存在するため、作成できません。エラーを無視しま" -"す" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "%(name)s からのエラーの後で続行します: %(error)s" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "スレーブホストに接続できませんでした: %s " - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "XenAPI に接続できませんでした: %s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "%(id)s の CPU 使用率を取得できませんでした: %(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "%(id)s のメモリー使用量を取得できませんでした: %(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "VM %s のCPU 使用率を取得できませんでした" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "インスタンス %s の IP アドレスを取得できませんでした" - -#, fuzzy, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "通知 %(type)s を除去しています (uuid:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"インスタンス の検索中に libvirt でエラーが発生しま" -"した: [エラーコード %(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "HTTP 応答を解析している際にエラーが発生しました: %s" - -msgid "Error stopping pollster." -msgstr "pollster 停止エラー。" - -msgid "Event" -msgstr "イベント" - -msgid "Expression evaluated to a NaN value!" -msgstr "式が NaN 値に評価されました。" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "%(name)s の拡張機能のインポートに失敗しました: %(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"インスタンス のデータを検査できませんでした。ドメ" -"イン状態は SHUTOFF です。" - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"%(instance_uuid)s のメモリー使用状況を検査できませんでした。libvirt から情報" -"を取得できません: %(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"インスタンス のメモリー使用状況を検査できませんで" -"した。libvirt から情報を取得できません。" - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "%s の通知ハンドラーをロードできませんでした" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "タイムスタンプ値 %s を解析できませんでした" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "%d データポイントの公開に失敗しました。これらは廃棄されます" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "%d データポイントの公開に失敗しました。これらをキューに入れてください" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "フィルター式が無効です: %s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "インスタンス %(name)s (%(instance_id)s) を無視しています: %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "インスタンス %(name)s を無視しています: %(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "ロードバランサー %(loadbalancer_id)s を無視しています" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "プール %(pool_id)s を無視しています" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "" -"%(line)s 行目の %(column)s 列で定義ファイル %(file)s の YAML 構文 が無効で" -"す。" - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "無効な期間 %(period)s: %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "特性 %(trait)s の特性タイプ '%(type)s' が無効です" - -msgid "Limit must be positive" -msgstr "上限は正の値でなければなりません" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "ストレージドライバーから id %s のイベントが複数返されました" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "複数の VM %s が XenServer に見つかりました" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "" -"connection_url と、使用する connection_password を指定する必要があります" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "%(name)s に使用できる %(plugin)s という名前のプラグインがありません" - -msgid "Node Manager init failed" -msgstr "ノードマネージャーの初期化に失敗しました" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "%(aspect)s %(id)s にアクセスする権限がありません" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "OpenDaylitght API から %(status)s %(reason)s が返されました" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "Opencontrail API から %(status)s %(reason)s が返されました" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "" -"演算子 %(operator)s はサポートされていません。フィールド %(field)s で使用でき" -"るのは等価演算子のみです。" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "" -"演算子 %(operator)s はサポートされていません。サポートされている演算子は " -"%(supported)s です。" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "order-by 式が無効です: %s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "" -"%(name)s に関する JSONPath の指定 '%(jsonpath)s' のエラーを解析します: " -"%(err)s" - -msgid "Period must be positive." -msgstr "期間は正の数でなければなりません。" - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "" -"パイプライン %(pipeline)s: パブリッシャー %(pub)s からのエラーの発生後の " -"%(status)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "" -"パイプライン %(pipeline)s: パブリッシャー %(pub)s からのエラーの後で続行しま" -"す" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "" -"パイプライン %(pipeline)s: 変換プログラム %(trans)s をフラッシュするときにエ" -"ラーが発生しました" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "" -"パイプライン %(pipeline)s: %(smp)s について変換プログラム %(trans)s からエ" -"ラーが発生した後に終了します" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "プラグインが指定されていますが、%s にプラグイン名が提供されていません" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "センサー %(mtr)s のポーリングが %(cnt)s 回失敗しました" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "ポーリング %(name)s が %(cnt)s 回失敗しました" - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "%s の pollster が無効になっています" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "" -"pollster %(name)s がこれ以上ソース %(source)s をポーリングしないようにしてく" -"ださい" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "" -"パブリッシャー local_queue 最大長を超えました。古い方から %d 個のサンプルを除" -"去します" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "パブリッシュポリシーが不明です (%s)。強制的にデフォルトに設定されます" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "RGW AdminOps API から %(status)s %(reason)s が返されました" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "NorthBound REST API を使用した OpenDaylight への接続要求が失敗しました" - -#, python-format -msgid "Required field %s not specified" -msgstr "必須フィールド %s が指定されていません" - -msgid "Resource" -msgstr "リソース" - -msgid "Sample" -msgstr "サンプル" - -msgid "Samples should be included in request body" -msgstr "サンプルは要求本文に含まれる必要があります" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "%s の拡張機能のロードをスキップします" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "文字列 %s は無効な isotime です" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "" -"サンプルと gnocchi のリソース/メトリクス間のマッピングを定義する Yaml ファイ" -"ル" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"データ型 %(type)s はサポートされていません。サポートされているデータ型のリス" -"ト: %(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "%s にはフィールド 'fields' が必要です" - -msgid "The path for the file publisher is required" -msgstr "ファイルパブリッシャーのパスが必要です" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UDP: %s から送信されたデータをデコードできません" - -msgid "UDP: Unable to store meter" -msgstr "UDP: メーターを保存できません" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "データベースサーバーに接続できません: %(errmsg)s。" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "値 %(value)s を、想定されるデータ型 %(type)s に変換できません。" - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "リソースを検出できません: %s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "式 %(expr)s を評価できません: %(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "パブリッシャー %s をロードできません" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "ハイパーバイザーインスペクターをロードできません: %s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "" -"%(retries)d 回の再試行後、1 次 mongodb に再接続できません。中止します。" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"プライマリー mongodb に再接続できません: %(errmsg)s。%(retry_interval)d 秒以" -"内に再試行します。" - -msgid "Unable to send sample over UDP" -msgstr "UDP 経由でサンプルを送信できません" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"%(value)s を想定されるデータ型 %(type)s に変換する際に、想定しない例外が発生" -"しました。" - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "不明なディスカバリーエクステンション: %s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "不明なメタデータ種別です。キー (%s) は照会不可になります。" - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "" -"ロードバランサー %(id)s で不明な状態 %(stat)s を受信しました。サンプルをス" -"キップします" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "" -"ファイアウォール %(id)s で不明な状態 %(stat)s を受信しました。サンプルをス" -"キップします" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "" -"リスナー %(id)s で不明な状態 %(stat)s を受信しました。サンプルをスキップしま" -"す" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "" -"メンバー %(id)s で不明な状態 %(stat)s を受信しました。サンプルをスキップしま" -"す" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "" -"プール %(id)s で不明な状態 %(stat)s を受信しました。サンプルをスキップします" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "" -"仮想 IP %(id)s で不明な状態 %(stat)s を受信しました。サンプルをスキップします" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "" -"vpn %(id)s で不明な状態 %(stat)s を受信しました。サンプルをスキップします" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "VMware vSphere で VM %s が見つかりません" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "VM %s が XenServer に見つかりません" - -msgid "Wrong sensor type" -msgstr "センサー種別が正しくありません" - -msgid "XenAPI not installed" -msgstr "XenAPI がインストールされていません" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "定義ファイル %(file)s での読み取りの YAML エラー" - -msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." -msgstr "" -"Aodh が無効化されるか使用不可の場合、URL が使用できないことを警告します。" - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "%(id)s の CPU 時間を取得できませんでした: %(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "" -"Gnocchi を有効化した場合は、direct オプションを True に設定することはできませ" -"ん。" - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "期限切れのオーダーサンプルを廃棄しています: %s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "サンプル (先行なし) を廃棄しています: %s" - -msgid "ipmitool output length mismatch" -msgstr "ipmitool 出力の長さが一致しません" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes と backup_count は数値でなければなりません。" - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "" -"IPMI センサーデータの解析に失敗しました。指定された入力からデータが取得されま" -"せんでした" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "IPMI センサーデータの解析に失敗しました。不明なセンサー種別です。" - -msgid "running ipmitool failure" -msgstr "ipmitool の実行に失敗しました" diff --git a/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-error.po b/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-error.po deleted file mode 100644 index 0d0ad486..00000000 --- a/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-error.po +++ /dev/null @@ -1,135 +0,0 @@ -# Andreas Jaeger , 2016. #zanata -# Sungjin Kang , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-07 03:38+0000\n" -"Last-Translator: SeYeon Lee \n" -"Language-Team: Korean (South Korea)\n" -"Language: ko-KR\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=1; plural=0\n" - -#, python-format -msgid "Cannot load inspector %(name)s: %(err)s" -msgstr "%(name)s 검사기를 로드할 수 없음: %(err)s" - -#, python-format -msgid "Could not get Resident Memory Usage for %(id)s: %(e)s" -msgstr "%(id)s의 상주 메모리 사용을 가져올 수 없음 : %(e)s" - -#, python-format -msgid "Dispatcher failed to handle the %s, requeue it." -msgstr "디스패처에서 %s을(를) 처리하지 못하여 다시 대기열에 둡니다." - -msgid "Error connecting to coordination backend." -msgstr "조정 백엔드를 연결하는 중에 오류가 발생했습니다." - -msgid "Error getting group membership info from coordination backend." -msgstr "조정 백엔드에서 그룹 멤버십 정보를 가져오는 중에 오류가 발생했습니다." - -#, python-format -msgid "Error joining partitioning group %s, re-trying" -msgstr "" -"파티션 지정 그룹 %s을(를) 결합하는 중에 오류가 발생하여, 다시 시도 중입니다." - -#, python-format -msgid "Error processing event and it will be dropped: %s" -msgstr "이벤트 처리 중 오류가 발생하므로 삭제됨: %s" - -msgid "Error sending a heartbeat to coordination backend." -msgstr "하트비트를 조정 백엔드에서 보내는 중에 오류가 발생했습니다." - -msgid "Fail to process a notification" -msgstr "알림을 처리하는 데 실패" - -msgid "Fail to process notification" -msgstr "알림을 처리하는 데 실패" - -msgid "Failed to connect to Gnocchi." -msgstr "Gnocchi에 연결하지 못했습니다." - -#, python-format -msgid "Failed to connect to Kafka service: %s" -msgstr "Kafka 서비스에 연결하는 데 실패: %s" - -#, python-format -msgid "Failed to connect to db, purpose %(purpose)s re-try later: %(err)s" -msgstr "DB에 연결하는 데 실패, %(purpose)s 용도를 나중에 다시 시도: %(err)s" - -#, python-format -msgid "Failed to connect to db, purpose %(purpose)s retry later: %(err)s" -msgstr "DB에 연결하는 데 실패, %(purpose)s 용도를 나중에 다시 시도: %(err)s" - -#, python-format -msgid "Failed to load resource due to error %s" -msgstr "%s 오류로 인해 자원을 로드하는 데 실패" - -#, python-format -msgid "Failed to record event: %s" -msgstr "이벤트를 기록하는 데 실패: %s" - -msgid "Failed to retry to send sample data with max_retry times" -msgstr "샘플 데이터를 max_retry 횟수만큼 보내는 데 실패" - -msgid "" -"Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: Current agent is " -"not part of group and cannot take tasks" -msgstr "" -"그룹 ID: %{group_id}s, 멤버: %{members}s, 사용자: %{me}s: 현재 에이전트가 그" -"룹의 일부가 아니므로 작업을 수행할 수 없음" - -#, python-format -msgid "Invalid type %s specified" -msgstr "올바르지 않은 유형 %s이(가) 지정됨" - -#, python-format -msgid "Missing field %s" -msgstr "%s 필드 누락" - -msgid "Passed resource dict must contain keys resource_id and resource_url." -msgstr "전달된 자원 dict에 키 resource_id와 resource_url이 포함되어야 합니다." - -#, python-format -msgid "Required field %(field)s should be a %(type)s" -msgstr "필수 필드 %(field)s은(는) %(type)s이어야 함" - -#, python-format -msgid "Required field %s not specified" -msgstr "필수 필드 %s이(가) 지정되지 않음" - -#, python-format -msgid "Required fields %s not specified" -msgstr "필수 필드 %s이(가) 지정되지 않음" - -#, python-format -msgid "Skip invalid resource %s" -msgstr "올바르지 않은 자원 %s 건너뛰기" - -#, python-format -msgid "Skipping %(name)s, keystone issue: %(exc)s" -msgstr "%(name)s 건너뛰기, keystone 문제: %(exc)s" - -msgid "Status Code: %{code}s. Failed todispatch event: %{event}s" -msgstr "상태 코드: %{code}s. 이벤트를 디스패치하는 데 실패: %{event}s" - -#, python-format -msgid "Unable to load changed event pipeline: %s" -msgstr "변경된 이벤트 파이프라인을 로드할 수 없음: %s" - -#, python-format -msgid "Unable to load changed pipeline: %s" -msgstr "변경된 파이프라인을 로드할 수 없음: %s" - -#, python-format -msgid "Unrecognized type value %s" -msgstr "인식되지 않은 유형 값 %s" - -#, python-format -msgid "inspector call failed for %(ident)s host %(host)s: %(err)s" -msgstr "%(ident)s 호스트 %(host)s의 검사기 호출에 실패: %(err)s" diff --git a/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-info.po b/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-info.po deleted file mode 100644 index d3fe6a27..00000000 --- a/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-info.po +++ /dev/null @@ -1,128 +0,0 @@ -# Sungjin Kang , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev57\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-04-18 02:09+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-07 03:39+0000\n" -"Last-Translator: SeYeon Lee \n" -"Language-Team: Korean (South Korea)\n" -"Language: ko-KR\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=1; plural=0\n" - -#, python-format -msgid "%d events are removed from database" -msgstr "데이터베이스에서 %d 이벤트가 제거됨" - -#, python-format -msgid "%d samples removed from database" -msgstr "데이터베이스에서 %d 샘플이 제거됨" - -msgid "Configuration:" -msgstr "구성:" - -#, python-format -msgid "Connecting to %(db)s on %(nodelist)s" -msgstr "%(nodelist)s에서 %(db)s에 연결 중 " - -msgid "Coordination backend started successfully." -msgstr "조정 백엔드가 성공적으로 시작되었습니다." - -#, python-format -msgid "Definitions: %s" -msgstr "정의: %s" - -msgid "Detected change in pipeline configuration." -msgstr "파이프라인 구성의 변경을 발견했습니다." - -#, python-format -msgid "Dropping event data with TTL %d" -msgstr "TTL이 %d인 이벤트 데이터 삭제" - -#, python-format -msgid "Dropping metering data with TTL %d" -msgstr "TTL이 %d인 측정 데이터 삭제" - -#, python-format -msgid "Duplicate event detected, skipping it: %s" -msgstr "중복 이벤트가 발견되어 해당 이벤트를 건너뜀: %s" - -msgid "Expired residual resource and meter definition data" -msgstr "잔여 자원 및 측정 정의 데이터 만료됨" - -#, python-format -msgid "Index %s will be recreate." -msgstr "%s 인덱스가 다시 생성됩니다." - -#, python-format -msgid "Joined partitioning group %s" -msgstr "결합된 파티션 그룹 %s" - -#, python-format -msgid "Left partitioning group %s" -msgstr "남은 파티션 그룹 %s" - -#, python-format -msgid "No limit value provided, result set will be limited to %(limit)d." -msgstr "한계 값이 제공되지 않음, 결과 세트가 %(limit)d(으)로 제한됩니다." - -msgid "Nothing to clean, database event time to live is disabled" -msgstr "정리할 사항이 없음, 데이터베이스 이벤트 지속 시간(TTL)이 사용되지 않음" - -msgid "Nothing to clean, database metering time to live is disabled" -msgstr "정리할 사항이 없음, 데이터베이스 측정 지속 시간(TTL)이 사용되지 않음" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Setup transformer instance %(name)s with parameter " -"%(param)s" -msgstr "" -"파이프라인 %(pipeline)s: %(param)s 매개변수로 변환기 인스턴스 %(name)s 설정 " - -#, python-format -msgid "Pipeline config: %s" -msgstr "파이프라인 구성: %s" - -msgid "Pipeline configuration file has been updated." -msgstr "파이프라인 구성 파일이 업데이트되었습니다." - -#, python-format -msgid "Polling pollster %(poll)s in the context of %(src)s" -msgstr "%(src)s 컨텍스트의 의견조사자 %(poll)s 폴링" - -#, python-format -msgid "Publishing policy set to %s" -msgstr "공개 정책이 %s(으)로 설정됨" - -msgid "Reconfiguring polling tasks." -msgstr "폴링 작업을 재구성합니다." - -msgid "Reloading notification agent and listeners." -msgstr "알림 에이전트와 리스너를 다시 로드합니다." - -#, python-format -msgid "Skip pollster %(name)s, no %(p_context)sresources found this cycle" -msgstr "Pollster %(name)s 건너뛰기, %(p_context)s 자원에서 이 주기를 발견함" - -#, python-format -msgid "Starting server in PID %s" -msgstr "PID %s의 서버 시작" - -msgid "detected decoupled pipeline config format" -msgstr "비결합 파이프라인 구성 형식 발견" - -#, python-format -msgid "metering data %(counter_name)s for %(resource_id)s: %(counter_volume)s" -msgstr "%(resource_id)s의 측정 데이터 %(counter_name)s: %(counter_volume)s" - -#, python-format -msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s" -msgstr "0.0.0.0:%(sport)s에서 전달 중, http://127.0.0.1:%(vport)s에서 보기" - -#, python-format -msgid "serving on http://%(host)s:%(port)s" -msgstr "http://%(host)s:%(port)s에서 전달 중" diff --git a/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-warning.po b/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-warning.po deleted file mode 100644 index 26ff24ac..00000000 --- a/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer-log-warning.po +++ /dev/null @@ -1,155 +0,0 @@ -# Sungjin Kang , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-07 03:34+0000\n" -"Last-Translator: SeYeon Lee \n" -"Language-Team: Korean (South Korea)\n" -"Language: ko-KR\n" -"X-Generator: Zanata 3.7.3\n" -"Plural-Forms: nplurals=1; plural=0\n" - -msgid "Can't connect to keystone, assuming aodh is disabled and retry later." -msgstr "" -"Keystone에 연결할 수 없습니다 . Aodh가 사용되지 않는다고 가정하여 나중에 다" -"시 시도합니다." - -msgid "Can't connect to keystone, assuming gnocchi is disabled and retry later" -msgstr "" -"Keystone에 연결할 수 없습니다. Gnocchi가 사용되지 않는다고 가정하여 나중에 다" -"시 시도합니다." - -msgid "" -"Cannot extract tasks because agent failed to join group properly. Rejoining " -"group." -msgstr "" -"에이전트가 적절하게 그룹을 결합하지 못했으므로 작업을 추출할 수 없습니다. 그" -"룹을 다시 결합합니다." - -#, python-format -msgid "" -"Cannot inspect data of %(pollster)s for %(instance_id)s, non-fatal reason: " -"%(exc)s" -msgstr "" -"%(instance_id)s의 %(pollster)s 데이터를 검사할 수 없습니다. 치명적이지 않은 " -"이유: %(exc)s" - -#, python-format -msgid "Dropping out of time order sample: %s" -msgstr "시간 순서 샘플에서 삭제: %s" - -#, python-format -msgid "Dropping sample with no predecessor: %s" -msgstr "선행 작업이 없는 샘플 삭제: %s" - -#, python-format -msgid "Duplicated values: %s found in CLI options, auto de-duplicated" -msgstr "중복된 값: CLI 옵션에 %s이(가) 있습니다. 자동으로 중복이 해제됩니다." - -#, python-format -msgid "Failed to load any dispatchers for %s" -msgstr "%s의 디스패처를 로드하는 데 실패" - -#, python-format -msgid "" -"Failed to parse date from set fields, both fields %(start)s and %(end)s must " -"be datetime: %(err)s" -msgstr "" -"설정 필드에서 데이터를 구문 분석하는 데 실패, 두 필드 %(start)s 와 %(end)s은" -"(는) 모두 datetime임: %(err)s" - -#, python-format -msgid "Ignore unrecognized field %s" -msgstr "인식되지 않는 필드 %s 무시" - -#, python-format -msgid "Invalid status, skipping IP address %s" -msgstr "올바르지 않은 상태, IP 주소 %s 건너뛰기" - -msgid "Negative delta detected, dropping value" -msgstr "음수의 델타가 발견되어 값을 삭제함" - -#, python-format -msgid "No endpoints found for service %s" -msgstr "%s 서비스의 엔드포인트를 찾을 수 없음" - -msgid "" -"Non-metric meters may be collected. It is highly advisable to disable these " -"meters using ceilometer.conf or the pipeline.yaml" -msgstr "" -"비측정 미터를 수집할 수 없습니다. celometer.conf 또는 pipeline.yaml을 사용하" -"여 이러한 미터를 사용하지 않게 설정하는 것이 좋습니다." - -#, python-format -msgid "" -"Skipping %(name)s, %(service_type)s service is not registered in keystone" -msgstr " %(name)s, %(service_type)s 서비스 건너뛰기는 keystone에 등록되지 않음" - -#, python-format -msgid "Skipping duplicate meter definition %s" -msgstr "중복 측정 정의 %s 건너뛰기" - -msgid "" -"Timedelta plugin is required two timestamp fields to create timedelta value." -msgstr "" -"Timedelta 플러그인에서 timedelta 값을 생성하려면 두 개의 시간소인 필드가 필요" -"합니다." - -msgid "" -"ceilometer-api started with aodh enabled. Alarms URLs will be redirected to " -"aodh endpoint." -msgstr "" -"Aodh가 사용된 상태로 ceilometer-api가 시작되었습니다. 알람 URL이 aodh 엔드포" -"인트로 경로가 재지정됩니다." - -msgid "" -"ceilometer-api started with gnocchi enabled. The resources/meters/samples " -"URLs are disabled." -msgstr "" -"Gnocchi를 사용한 상태로 ceilometer-api가 시작되었습니다. 자원/측정/샘플 URL" -"을 사용하지 않습니다." - -#, python-format -msgid "event signature invalid, discarding event: %s" -msgstr "이벤트 서명이 올바르지 않아 이벤트를 삭제함: %s" - -#, python-format -msgid "" -"metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has no " -"volume (volume: None), the sample will be dropped" -msgstr "" -"%(resource_id)s @ %(timestamp)s의 측정 데이터 %(counter_name)s에 볼륨" -"(volume: None)이 없으므로 샘플이 삭제됩니다." - -#, python-format -msgid "" -"metering data %(counter_name)s for %(resource_id)s @ %(timestamp)s has " -"volume which is not a number (volume: %(counter_volume)s), the sample will " -"be dropped" -msgstr "" -"%(resource_id)s @ %(timestamp)s의 측정 데이터 %(counter_name)s에 번호" -"(volume: %(counter_volume)s)가 아닌 볼륨이 있으므로, 샘플이 삭제됩니다." - -msgid "" -"pecan_debug cannot be enabled, if workers is > 1, the value is overrided " -"with False" -msgstr "" -"pecan_debug를 사용하도록 설정할 수 없습니다. 작업자가 > 1이면 값이 False로 겹" -"쳐씁니다." - -#, python-format -msgid "" -"split plugin is deprecated, add \".`split(%(sep)s, %(segment)d, " -"%(max_split)d)`\" to your jsonpath instead" -msgstr "" -"분할 플러그인은 더 이상 사용되지 않음, 대신 \".`split(%(sep)s, %(segment)d, " -"%(max_split)d)`\"을(를) jsonpath에 추가" - -#, python-format -msgid "unable to configure oslo_cache: %s" -msgstr "oslo_cache를 구성할 수 없음: %s" diff --git a/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po b/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po deleted file mode 100644 index fd8c29b2..00000000 --- a/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,484 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Seong-ho Cho , 2014 -# Seunghyo Chun , 2013 -# Seunghyo Chun , 2013 -# Sungjin Kang , 2013 -# Sungjin Kang , 2013 -# Andreas Jaeger , 2016. #zanata -# Sungjin Kang , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-07 03:44+0000\n" -"Last-Translator: SeYeon Lee \n" -"Language: ko-KR\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Korean (South Korea)\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "%(entity)s %(id)s을(를) 찾을 수 없음" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "'%s' 표현식에서 산술 변환기는 하나 이상의 미터를 사용해야 함" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "%(table_name)s 테이블을 작성할 수 없음, 이미 존재합니다. 오류 무시" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "%(name)s에서 오류 후 계속: %(error)s" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "슬레이브 호스트를 연결할 수 없음: %s " - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "XenAPI를 연결할 수 없음: %s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "%(id)s에 대해 CPU Util을 가져올 수 없음: %(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "%(id)s에 대한 메모리 사용을 가져올 수 없음: %(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "VM %s CPU 이용률을 가져올 수 없음" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "%s 인스턴스의 IP 주소를 얻을 수 없음" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "알림 %(type)s 삭제 중(uuid:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"인스턴스 검색 중 libvirt에서 오류 발생: [오류 코" -"드 %(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "HTTP 응답 구문 분석 중 오류 발생: %s" - -msgid "Error stopping pollster." -msgstr "의견조사자를 중지하는 중에 오류가 발생했습니다. " - -msgid "Event" -msgstr "이벤트" - -msgid "Expression evaluated to a NaN value!" -msgstr "표현식이 NaN 값으로 평가되었습니다!" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "%(name)s 확장자를 가져오는 데 실패함: %(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"인스턴스 <이름=%(name)s, id=%(id)s>의 데이터 검사 실패, 도메인 상태가 SHUTOFF" -"입니다." - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"%(instance_uuid)s의 메모리 사용량 검사 실패, libvirt에서 정보를 가져올 수 없" -"음: %(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"인스턴스 <이름=%(name)s, id=%(id)s>의 메모리 사용량 검사 실패, libvirt에서 정" -"보를 가져올 수 없습니다." - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "%s의 알림 핸들러 로드 실패" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "시간소인 값 %s 구문 분석 실패" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "%d 데이터포인트 공개 실패. 이를 삭제하는 중" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "%d 데이터포인트 공개 실패. 이를 큐에 대기시킴" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "필터 표현식이 올바르지 않음: %s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "인스턴스 %(name)s (%(instance_id)s) 무시 중: %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "인스턴스 %(name)s 무시 중: %(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "로드 밸런서 %(loadbalancer_id)s 무시" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "풀 %(pool_id)s 무시" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "" -"다음에서 정의 파일 %(file)s의 올바르지 않은 YAML 구문: 행: %(line)s, 열: " -"%(column)s" - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "올바르지 않은 기간 %(period)s: %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "특성 %(trait)s에 대한 올바르지 않은 특성 유형 '%(type)s'" - -msgid "Limit must be positive" -msgstr "제한 값은 양수여야 합니다." - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "ID가 %s인 둘 이상의 이벤트가 스토리지 드라이버에서 리턴됨" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "여러 VM %s을(를) XenServer에서 찾음 " - -msgid "Must specify connection_url, and connection_password to use" -msgstr "사용할 connection_url 및 connection_password를 지정해야 함 " - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "%(name)s에 대해 %(plugin)s(이)라는 플러그인을 사용할 수 없음" - -msgid "Node Manager init failed" -msgstr "노드 관리자 초기화 실패" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "%(aspect)s %(id)s에 대한 액세스 권한이 부여되지 않음" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "OpenDaylitght API가 %(status)s 리턴: %(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "Opencontrail API가 %(status)s 리턴: %(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "" -"연산자 %(operator)s이(가) 지원되지 않습니다. 필드 %(field)s에는 등호 연산자" -"만 사용할 수 있습니다." - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "" -"연산자 %(operator)s이(가) 지원되지 않습니다. 지원되는 연산자는 %(supported)s" -"입니다. " - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "Order-by 표현식이 올바르지 않음: %s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "" -" %(name)s에 대한 JSONPath 스펙 '%(jsonpath)s'의 구문 분석 오류: %(err)s" - -msgid "Period must be positive." -msgstr "기간은 양수여야 합니다. " - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "파이프라인 %(pipeline)s: 공개자 %(pub)s에서 오류 후 %(status)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "파이프라인 %(pipeline)s: 공개자 %(pub)s에서 오류 후 계속" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "파이프라인 %(pipeline)s: 변환기 %(trans)s을(를) 비우는 중 오류 발생" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "파이프라인 %(pipeline)s: %(smp)s의 변환기 %(trans)s에서 오류 후 종료" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "플러그인이 지정되지 않았지만, %s에 플러그인 이름이 제공되지 않음" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "폴링 %(mtr)s 센서가 %(cnt)s번 실패했습니다!" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "폴링 %(name)s이(가) %(cnt)s번 실패했습니다!" - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "%s의 의견조사자가 사용 안함으로 설정되어 있습니다!" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "" -"의견조사자 %(name)s이(가) 소스 %(source)s를 더 이상 폴링하지 않도록 하십시오!" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "공개자 최대 local_queue 길이가 초과됨. %d 가장 오래된 샘플 삭제 중" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "공개 정책을 알 수 없음(%s). 기본값으로 강제 설정함" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "RGW AdminOps API가 %(status)s %(reason)s을(를) 리턴함" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "요청이 NorthBound REST API로 OpenDaylight에 연결하는 데 실패함" - -#, python-format -msgid "Required field %s not specified" -msgstr "필수 필드 %s이(가) 지정되지 않음" - -msgid "Resource" -msgstr "리소스" - -msgid "Sample" -msgstr "샘플" - -msgid "Samples should be included in request body" -msgstr "샘플이 요청 본문에 포함되어야 함" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "%s 확장자 로드 건너뛰기" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "문자열 %s이(가) 올바른 등시간이 아님" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "샘플과 gnocchi resources/ 메트릭 간 맵핑을 정의하는 Yaml 파일" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"데이터 유형 %(type)s이(가) 지원되지 않습니다. 지원되는 데이터 유형 목록은 " -"%(supported)s입니다." - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "%s에 'fields' 필드 필요" - -msgid "The path for the file publisher is required" -msgstr "파일 공개자의 경로가 필요함" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr " UDP: %s이(가) 보낸 데이터를 해독할 수 없습니다" - -msgid "UDP: Unable to store meter" -msgstr "UDP: 측정을 저장할 수 없습니다" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "데이터베이스 서버에 연결할 수 없음: %(errmsg)s." - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "%(value)s 값을 예상 데이터 유형 %(type)s(으)로 변환할 수 없습니다." - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "자원을 검색할 수 없음: %s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "%(expr)s 표현식을 평가할 수 없음: %(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "공개자 %s을(를) 로드할 수 없음" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "하이퍼바이저 검사기를 로드할 수 없음: %s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "" -"%(retries)d회 재시도한 이후에는 1차 mongodb에 다시 연결할 수 없습니다. 포기하" -"는 중입니다." - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"1차 mongodb에 다시 연결할 수 없음: %(errmsg)s. %(retry_interval)d초 후에 다" -"시 시도합니다." - -msgid "Unable to send sample over UDP" -msgstr "UDP를 통해 샘플을 전송할 수 없음" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"%(value)s을(를) 예상된 데이터 유형으로 변환하는 중에 예상치 않은 예외 발생 " -"%(type)s." - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "알 수 없는 검색 확장자: %s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "알 수 없는 메타데이터 유형입니다. 키(%s)를 조회할 수 없습니다." - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "" -"로드 밸런서 %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플 건너뛰기" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "" -"fw %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플을 건너뛰는 중" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "리스너 %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플 건너뛰기" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "" -"멤버 %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플을 건너뛰는 중" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "" -"풀 %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플을 건너뛰는 중" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "" -"vip %(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플을 건너뛰는 중" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "vpn%(id)s에서 알 수 없는 상태 %(stat)s이(가) 수신됨. 샘플 건너뛰기" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "VM %s을(를) VMware vSphere에서 찾을 수 없음" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "VM %s을(를) XenServer에서 찾을 수 없음 " - -msgid "Wrong sensor type" -msgstr "잘못된 센서 유형" - -msgid "XenAPI not installed" -msgstr "XenAPI가 설치되지 않음" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "정의 파일 %(file)s을(를) 읽는 중에 YAML 오류 발생" - -msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." -msgstr "" -"Aodh를 사용하지 않게 설정하거나 사용할 수 없는 경우 경보 URL을 사용할 수 없습" -"니다." - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "%(id)s의 CPU 시간을 가져올 수 없음: %(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "Gnocchi를 사용할 때 직접 옵션은 true일 수 없습니다." - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "시간 순서 샘플에서 벗어남: %s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "선행 작업이 없는 샘플 삭제: %s" - -msgid "ipmitool output length mismatch" -msgstr "ipmitool 출력 길이 불일치" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes 및 backup_count는 숫자여야 합니다." - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "" -"IPMI 센서 데이터 구문 분석에 실패했음, 제공된 입력에서 검색된 데이터가 없음" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "IPMI 센서 데이터 구문 분석에 실패했음, 알 수 없는 센서 유형" - -msgid "running ipmitool failure" -msgstr "ipmitool 실행 실패" diff --git a/ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po b/ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po deleted file mode 100644 index 5afe8033..00000000 --- a/ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,492 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Gabriel Wainer, 2013 -# Gabriel Wainer, 2013 -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-22 11:05+0000\n" -"Last-Translator: Carlos Marques \n" -"Language: pt-BR\n" -"Plural-Forms: nplurals=2; plural=(n > 1);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Portuguese (Brazil)\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "%(entity)s %(id)s Não Encontrada" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "" -"O transformador aritmético deve usar pelo menos um medidor na expressão '%s'" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "" -"Não é possível criar a tabela %(table_name)s; ela já existe. Ignorando erro" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "Continuar após erro de %(name)s: %(error)s" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "Não foi possível conectar-se ao host escravo: %s" - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "Não foi possível conectar-se ao XenAPI: %s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "Não foi possível obter Uso de CPU para %(id)s: %(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "Não foi possível obter de Uso de Memória para %(id)s: %(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "Não foi possível obter a utilização de CPU da máquina virtual %s" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "Não foi possível obter o endereço IP da instância %s" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "Descartando Notificação %(type)s (uuid:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"Erro de libvirt ao consultar instância : [Código " -"de Erro %(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "Erro ao analisar a resposta de HTTP: %s" - -msgid "Error stopping pollster." -msgstr "Erro ao parar pesquisador. " - -msgid "Event" -msgstr "Evento" - -msgid "Expression evaluated to a NaN value!" -msgstr "Expressão avaliada para um valor NaN!" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "Falha ao importar extensão para %(name)s: %(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"Falha ao inspecionar os dados da instância , " -"estado do domínio é SHUTOFF." - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"Falha ao inspecionar o uso da memória de %(instance_uuid)s, não é possível " -"obter informações a partir de libvirt: %(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"Falha ao inspecionar o uso da memória da instância , não é possível obter informações a partir de libvirt." - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "Falha ao carregar qualquer manipulador de notificações para %s" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "Falha ao analisar o valor do registro de data e hora %s" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "Falha ao publicar %d pontos de dados, descartando-os" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "Falha ao publicar %d pontos de dados, enfileire-os" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Expressão de filtro inválida: %s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "Ignorando a instância %(name)s (%(instance_id)s): %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "Ignorando a instância %(name)s: %(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "Ignorando loadbalancer %(loadbalancer_id)s" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "Ignorando conjunto%(pool_id)s" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "" -"Sintaxe YAML inválida no arquivo de definições %(file)s na linha: %(line)s, " -"coluna: %(column)s." - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "Período inválido %(period)s: %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "Tipo de traço inválido '%(type)s' para traço %(trait)s" - -msgid "Limit must be positive" -msgstr "Limite deve ser positivo" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "" -"Mais de um evento com o ID %s retornado a partir do driver de armazenamento" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "Várias máquinas virtuais %s localizadas no XenServer" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "connection_url e connection_password devem ser especificados para uso" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "Nenhum plug-in nomeado %(plugin)s disponível para %(name)s" - -msgid "Node Manager init failed" -msgstr "Inicialização do gerenciador de nó com falha" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "Não Autorizado a acessar %(aspect)s %(id)s" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "API OpenDaylitght retornou %(status)s %(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "A API Opencontrail retornou%(status)s%(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "" -"O operador %(operator)s não é suportado. Somente operador de igualdade está " -"disponível para o campo %(field)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "" -"O operador %(operator)s não é suportado. Os operadores suportados são: " -"%(supported)s" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "Expressão solicitada inválida: %s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "" -"Erro de análise na especificação JSONPath '%(jsonpath)s' para %(name)s: " -"%(err)s" - -msgid "Period must be positive." -msgstr "Período deve ser positivo." - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "Pipeline %(pipeline)s: %(status)s após erro do publicador %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "Pipeline %(pipeline)s: Continue após erro do publicador %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "Pipeline %(pipeline)s: Erro ao limpar transformador %(trans)s" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "" -"Pipeline %(pipeline)s: Saia após erro do transformador %(trans)s para %(smp)s" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "Plug-in especificado, mas nenhum nome de plug-in fornecido para %s" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "O sensor de pesquisa %(mtr)s falhou para %(cnt)s vezes!" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "A pesquisa %(name)s falhou para %(cnt)s vezes!" - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "O pesquisador para %s está desativado!" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "" -"Evite o pesquisador %(name)s para a origem de pesquisa %(source)s atualmente!" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "" -"Comprimento máximo de local_queue do publicador foi excedido, descartando %d " -"amostras antigas" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "Publicando política desconhecida (%s) força para o padrão" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "A API AdminOps RGW retornou %(status)s %(reason)s" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "O pedido falhou ao conectar-se ao OpenDaylight com API REST NorthBound" - -#, python-format -msgid "Required field %s not specified" -msgstr "Campo obrigatório %s não especificado" - -msgid "Resource" -msgstr "Recurso" - -msgid "Sample" -msgstr "Amostra" - -msgid "Samples should be included in request body" -msgstr "As amostras devem ser incluídas no corpo da solicitação" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "Ignorar a extensão de carregamento para %s" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "Sequência %s não é um isotime válido" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "" -"O arquivo Yaml que define o mapeamento entre amostras e recursos gnocchi/" -"métrica" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"O tipo de dados %(type)s não é suportado. A lista de tipos de dados " -"suportados é: %(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "O campo 'fields' é necessário para %s" - -msgid "The path for the file publisher is required" -msgstr "O caminho para o publicador do arquivo é necessário" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UDP: Não pode decodificar dados enviados por %s" - -msgid "UDP: Unable to store meter" -msgstr "UDP: Não é possível armazenar medida" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "Não é possível conectar-se ao servidor de banco de dados: %(errmsg)s." - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"Não é possível converter o valor %(value)s para o tipo de dados esperado " -"%(type)s." - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "Não é possível descobrir recursos: %s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "Não é possível avaliar expressão %(expr)s:%(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "Impossível carregar publicador %s" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "Impossível carregar o inspetor do hypervisor: %s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "" -"Não é possível se reconectar ao mongodb primário após %(retries)d novas " -"tentativas. Desistindo." - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"Não é possível se reconectar ao mongodb primário: %(errmsg)s. Tentando " -"novamente em %(retry_interval)d segundos." - -msgid "Unable to send sample over UDP" -msgstr "Não é possível enviar amostra sobre UDP" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Exceção inesperada convertendo %(value)s para o tipo de dado esperado " -"%(type)s." - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "Extensão de descoberta desconhecida: %s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "Tipo de metada desconhecido. Chave (%s) não será consultável." - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "" -"Status desconhecido %(stat)s recebido no Balanceador de Carga %(id)s, " -"ignorando a amostra" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "" -"Status desconhecido %(stat)s recebido na largura da fonte %(id)s, ignorando " -"a amostra" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "" -"Status desconhecido %(stat)s recebido no listener %(id)s, ignorando a amostra" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "" -"Status desconhecido %(stat)s recebido no membro %(id)s, ignorando a amostra" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "" -"Status desconhecido %(stat)s recebido no conjunto %(id)s, ignorando amostras" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "" -"Status desconhecido %(stat)s recebido em vip %(id)s, ignorando a amostra" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "" -"Status desconhecido %(stat)s recebido recebido no vpn %(id)s, ignorando a " -"amostra" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "VM %s não localizado no VMware vSphere" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "Máquina virtual %s não localizada no XenServer" - -msgid "Wrong sensor type" -msgstr "Tipo de sensor errado" - -msgid "XenAPI not installed" -msgstr "XenAPI não instalado" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "Erro YAML ao ler o arquivo de definições %(file)s" - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "não pôde obter o tempo de CPU para %(id)s: %(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "A opção direta não pode ser true quando o Gnocchi está ativado. " - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "eliminando amostra fora de ordem de tempo: %s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "descartando amostra sem predecessor: %s" - -msgid "ipmitool output length mismatch" -msgstr "incompatibilidade no comprimento da saída de ipmitool" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes e backup_count devem ser números." - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "" -"análise dos dados do sensor IPMI com falha, nenhum dado recuperado da " -"entrada fornecida" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "análise dos dados do sensor IPMI com falha,tipo de sensor desconhecido" - -msgid "running ipmitool failure" -msgstr "executando falha de ipmitool" diff --git a/ceilometer/locale/ru/LC_MESSAGES/ceilometer.po b/ceilometer/locale/ru/LC_MESSAGES/ceilometer.po deleted file mode 100644 index 98c2a275..00000000 --- a/ceilometer/locale/ru/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,495 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Andreas Jaeger , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-10 08:01+0000\n" -"Last-Translator: Grigory Mokhin \n" -"Language: ru\n" -"Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" -"%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" -"%100>=11 && n%100<=14)? 2 : 3);\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Russian\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "%(entity)s %(id)s не найден" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "" -"Арифметический преобразователь должен использовать хотя бы один счетчик в " -"выражении %s'" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "" -"Не удалось создать таблицу %(table_name)s: уже существует. Игнорирование " -"ошибки" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "Продолжить после ошибки с %(name)s: %(error)s" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "Не удалось подключиться к подчиненному хосту: %s " - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "Не удалось подключиться к XenAPI: %s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "" -"Не удалось получить информацию об использовании процессора для %(id)s: %(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "" -"Не удалось получить информацию об использовании памяти для %(id)s: %(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "" -"Не удалось получить информацию об использовании CPU для виртуальной машины %s" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "Не удалось получить IP-адрес экземпляра %s" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "Удаление уведомления %(type)s (uuid:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"Возникла ошибка в libvirt при поиске экземпляра <имя=%(name)s, ИД=%(id)s>: " -"[Код ошибки: %(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "Ошибка анализа ответа HTTP: %s" - -msgid "Error stopping pollster." -msgstr "Ошибка остановки опрашивающего объекта." - -msgid "Event" -msgstr "Событие" - -msgid "Expression evaluated to a NaN value!" -msgstr "Результат вычисления выражения - значение NaN!" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "Не удалось импортировать расширение для %(name)s: %(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"Не удалось проверить данные экземпляра <имя=%(name)s, ИД=%(id)s>, состояние " -"домена - SHUTOFF." - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"Не удалось проверить использование памяти экземпляром %(instance_uuid)s, не " -"удалось получить информацию от libvirt: %(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"Не удалось проверить использование памяти экземпляром <имя=%(name)s, ИД=" -"%(id)s>, не удалось получить информацию от libvirt." - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "Не удалось загрузить обработчики уведомлений для %s" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "Ошибка анализа значения времени %s" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "Не удалось опубликовать %d точек данных, выполняется их удаление" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "Не удалось опубликовать %d точек данных, создайте для них очередь" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "Недопустимое выражение фильтра: %s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "Игнорирование экземпляра %(name)s (%(instance_id)s) : %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "Игнорирование экземпляра %(name)s: %(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "Балансировщик нагрузки %(loadbalancer_id)s игнорируется" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "Пул %(pool_id)s игнорируется" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "" -"Недопустимый синтаксис YAML в файле определений %(file)s; строка: %(line)s, " -"столбец: %(column)s." - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "Недопустимый интервал %(period)s: %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "Недопустимый тип особенности %(type)s для особенности %(trait)s" - -msgid "Limit must be positive" -msgstr "Ограничение должно быть положительным" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "Из драйвера хранилища возвращено несколько событий с ИД %s" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "Найдено несколько виртуальных машин %s в XenServer" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "Необходимо указать connection_url и connection_password" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "Нет доступного модуля %(plugin)s для %(name)s" - -msgid "Node Manager init failed" -msgstr "Сбой инициализации администратора узлов" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "Нет прав доступа к %(aspect)s %(id)s" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "Функция API OpenDaylight вернула %(status)s %(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "API Opencontrail возвратил %(status)s %(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "" -"Оператор %(operator)s не поддерживается. Для поля %(field)s возможен только " -"оператор равенства" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "" -"Оператор %(operator)s не поддерживается. Поддерживаемые операторы: " -"%(supported)s" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "Недопустимое выражение сортировки: %s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "" -"Ошибка анализа спецификации JSONPath %(jsonpath)s для %(name)s: %(err)s" - -msgid "Period must be positive." -msgstr "Период должен быть положительным." - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "Конвейер %(pipeline)s: %(status)s после ошибки от публикатора %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "Конвейер %(pipeline)s: Продолжение после ошибки из публикатора %(pub)s" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "Конвейер %(pipeline)s: Ошибка выгрузки преобразователя %(trans)s" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "" -"Конвейер %(pipeline)s: Выход после ошибки из преобразователя %(trans)s для " -"%(smp)s" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "Указан модуль, но не передано имя модуля для %s" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "%(cnt)s-кратный сбой датчика опроса %(mtr)s!" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "Опрос %(name)s не удалось выполнить %(cnt)s раз." - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "Опрашивающий объект для %s выключен!" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "" -"Сделайте так, чтобы опрашивающий объект %(name)s больше не опрашивал " -"источник %(source)s!" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "" -"Превышена максимальная длина local_queue публикатора, удаление %d самых " -"старых образцов" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "Стратегия публикации неизвестна (%s). По умолчанию принудительная" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "Функция API RGW AdminOps вернула %(status)s %(reason)s" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "Сбой запроса на подключение к OpenDaylight с API REST NorthBound" - -#, python-format -msgid "Required field %s not specified" -msgstr "Не указано обязательное поле %s" - -msgid "Resource" -msgstr "Ресурс" - -msgid "Sample" -msgstr "Образец" - -msgid "Samples should be included in request body" -msgstr "Образцы должны включаться в тело запроса" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "Пропустить загрузку расширения для %s" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "Строка %s не является допустимым значением isotime" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "" -"Файл Yaml, определяющий связи между образцами и ресурсами gnocchi " -"(показателями)" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "" -"Тип данных %(type)s не поддерживается. Список поддерживаемых типов данных: " -"%(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "Поле 'fields' является обязательным для %s" - -msgid "The path for the file publisher is required" -msgstr "Требуется путь для публикатора файлов" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UDP: не удается декодировать данные, отправленные %s" - -msgid "UDP: Unable to store meter" -msgstr "UDP: не удалось сохранить счетчик" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "Не удалось подключиться к серверу базы данных: %(errmsg)s." - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "" -"Преобразовать значение %(value)s в ожидаемый тип данных %(type)s невозможно." - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "Не удалось найти ресурсы: %s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "Вычислить выражение %(expr)s невозможно: %(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "Не удалось загрузить публикатор %s" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "Не удалось загрузить инспектор гипервизора: %s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "" -"Не удалось повторно подключиться к основной базе данных mongodb после " -"%(retries)d попыток. Дальнейшие попытки прекращены." - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"Не удалось повторно подключиться к основной mongodb: %(errmsg)s. Повторное " -"подключение через %(retry_interval)d секунд." - -msgid "Unable to send sample over UDP" -msgstr "Не удалось отправить образец по UDP" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "" -"Возникла непредвиденная исключительная ситуация при преобразовании %(value)s " -"в ожидаемый тип данных %(type)s." - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "Неизвестное расширение поиска: %s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "Неизвестный тип метаданных. Ключ (%s) нельзя будет запрашивать." - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "" -"В балансировщике нагрузки %(id)s получено неизвестное состояние %(stat)s, " -"пример пропускается" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "" -"В fw %(id)s получено неизвестное состояние %(stat)s,пример пропускается" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "" -"В обработчике %(id)s получено неизвестное состояние %(stat)s, пример " -"пропускается" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "" -"В участнике %(id)s получено неизвестное состояние %(stat)s, пример " -"пропускается" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "" -"В пуле %(id)s получено неизвестное состояние %(stat)s,пример пропускается" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "" -"В vip %(id)s получено неизвестное состояние %(stat)s,пример пропускается" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "" -"В VPN %(id)s получено неизвестное состояние %(stat)s, пример пропускается" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "Виртуальная машина %s не найдена в VMware vSphere" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "Не найдена виртуальная машина %s в XenServer" - -msgid "Wrong sensor type" -msgstr "Неверный тип датчика" - -msgid "XenAPI not installed" -msgstr "XenAPI не установлен" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "Ошибка YAML при чтении файла определений %(file)s" - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "не удалось получить процессорное время для %(id)s: %(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "Параметр direct не может быть равен true, если включен Gnocchi." - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "удаление образца, выпадающего из хронологического порядка: %s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "удаление образца без предшественника: %s" - -msgid "ipmitool output length mismatch" -msgstr "несоответствие длины вывода ipmitool" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes и backup_count должны быть числами." - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "" -"сбой анализа данных датчика IPMI, не получены данные из переданного ввода" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "сбой анализа данных датчика IPMI, неизвестный тип датчика" - -msgid "running ipmitool failure" -msgstr "сбой выполнения ipmitool" diff --git a/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po b/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po deleted file mode 100644 index 93311b3e..00000000 --- a/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,465 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# aji.zqfan , 2015 -# yelu , 2013 -# Tom Fifield , 2013 -# 颜海峰 , 2014 -# yelu , 2013 -# Yu Zhang, 2013 -# Yu Zhang, 2013 -# 颜海峰 , 2014 -# English translations for ceilometer. -# Andreas Jaeger , 2016. #zanata -# Linda , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-31 03:32+0000\n" -"Last-Translator: Linda \n" -"Language: zh-CN\n" -"Language-Team: Chinese (China)\n" -"Plural-Forms: nplurals=1; plural=0\n" -"Generated-By: Babel 2.2.0\n" -"X-Generator: Zanata 3.7.3\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "无法找到%(entity)s %(id)s " - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "算术变形器在表达式'%s'中必须至少使用一个指标" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "表%(table_name)s已经存在,无法创建。忽略此错误继续执行。" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "遇到错误%(name)s:%(error)s,继续执行" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "无法连接伺服主机:%s " - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "无法连接到XenAPI:%s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "无法为虚拟机%(id)s获取CPU使用率:%(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "无法为%(id)s获取内存使用信息:%(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "无法获取虚拟机%s的CPU使用率" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "无法为实例%s获取IP地址" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "正在丢弃通知%(type)s (uuid:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"查找实例 <名称为 %(name)s,标识为 %(id)s> 时,libvirt 中出错:[错误代码 " -"%(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "解析HTTP响应失败: %s" - -msgid "Error stopping pollster." -msgstr "停止轮询程序时出错。" - -msgid "Event" -msgstr "事件" - -msgid "Expression evaluated to a NaN value!" -msgstr "表达式计算结果为NaN!" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "无法为%(name)s引入扩展:%(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "" -"为虚拟机获取监控数据失败了,虚拟机状态为SHUTOFF" - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"为虚拟机%(instance_uuid)s采集内存使用指标失败了,无法从libvirt获取信息:" -"%(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"为虚拟机采集内存使用指标失败了,无法从libvirt获取信" -"息。" - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "无法为%s加载任何通知处理器" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "解析时间戳%s失败" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "发布%d个数据点时失败,正在将其丢弃" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "发布%d个数据点时失败,将其入队" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "过滤表达式不合法:%s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "忽略虚拟机%(name)s (%(instance_id)s) : %(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "忽略虚拟机%(name)s:%(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "正在忽略负载均衡器 %(loadbalancer_id)s" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "正在忽略池 %(pool_id)s" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "定义文件%(file)s中有非法YAML语法,行:%(line)s,列%(column)s。" - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "非法的间隔%(period)s: %(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "特征%(trait)s包含了不合法的特征类型'%(type)s' " - -msgid "Limit must be positive" -msgstr "limit必须是正数" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "从数据库返回了多个id为%s的事件" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "多个虚拟机%s在XenServer中被找到" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "使用时必须指定connection_url和connection_password" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "未对 %(name)s 提供名为 %(plugin)s 的插件" - -msgid "Node Manager init failed" -msgstr "节点管理器初始化失败" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "权限不足以访问%(aspect)s %(id)s" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "OpenDaylight接口返回状态%(status)s,原因%(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "Opencontrail接口返回状态%(status)s,原因%(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "运算符 %(operator)s 不受支持。对于字段 %(field)s,只能使用等号运算符" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "运算符 %(operator)s 不受支持。受支持的运算符为:%(supported)s" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "orderby表达式不合法:%s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "对 %(name)s 指定的 JSONPath(即“%(jsonpath)s”)存在解析错误:%(err)s" - -msgid "Period must be positive." -msgstr "period 参数必须是正数" - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "" -"管道 %(pipeline)s:在发布程序 %(pub)s 中发生错误之后,处于 %(status)s 状态" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "流水线%(pipeline)s:发布器%(pub)s报错,继续执行" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "流水线%(pipeline)s:变形器%(trans)s清空数据时出错" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "流水线%(pipeline)s:数据%(smp)s的变形器%(trans)s遇到错误,退出" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "指定了插件,但未对 %s 提供插件名" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "拉取%(mtr)s传感器失败了%(cnt)s次!" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "轮询 %(name)s 已失败 %(cnt)s 次!" - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "%s的采集器被禁用" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "请阻止轮询程序 %(name)s 再轮询源 %(source)s!" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "发布的数据量超过本地队列最大长度,正在丢弃最老的%d个数据" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "未知的发布策略(%s),强制使用默认策略" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "RGW AdminOps接口返回%(status)s %(reason)s" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "请求无法连接到OpenDaylight的北向REST接口" - -#, python-format -msgid "Required field %s not specified" -msgstr "必填项%s没有填写" - -msgid "Resource" -msgstr "资源" - -msgid "Sample" -msgstr "数据" - -msgid "Samples should be included in request body" -msgstr "样本应包括在请求主体中" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "跳过为%s加载扩展" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "字符串%s不是个合法的标准时间格式" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "用于定义样本与 gnocchi 资源/度量值之间的映射的Yaml 文件" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "数据类型%(type)s不被支持。支持的数据类型列表:%(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "%s 需要字段“fields”" - -msgid "The path for the file publisher is required" -msgstr "文件发布器必须设置文件路径" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UDP: 无法解码由 %s 发送的数据" - -msgid "UDP: Unable to store meter" -msgstr "UDP: 无法存储计量器" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "无法连接到数据库服务器:%(errmsg)s。" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "无法转换%(value)s到预期的数据类型%(type)s。" - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "无法发现资源:%s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "无法计算表达式%(expr)s:%(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "无法加载发布器%s" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "无法加载管理程序的探测器:%s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "在%(retries)d次尝试后仍无法重连到MongoDB主节点。放弃重连。" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"无法重连到MongoDB主节点:%(errmsg)s。在%(retry_interval)d秒后进行重试。" - -msgid "Unable to send sample over UDP" -msgstr "无法通过UDP发送采样" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "在转换%(value)s到预期的数据类型%(type)s时发生了未预料的异常。" - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "未知的发现器插件:%s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "未知的元数据类型。键(%s)将无法进行查询。" - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "在负载均衡器 %(id)s 上接收到未知状态 %(stat)s,正在跳过样本" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "从fw %(id)s收到未知的状态%(stat)s,跳过该采样数据" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "在侦听器 %(id)s 上接收到未知状态 %(stat)s,正在跳过样本" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "在成员 %(id)s 上接收到未知状态 %(stat)s,正在跳过样本" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "从pool %(id)s收到未知的状态%(stat)s,跳过该采样数据" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "从vip %(id)s收到未知的状态%(stat)s,跳过该采样数据" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "在 VPN %(id)s 上接收到未知状态 %(stat)s,正在跳过样本" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "在 VMware vSphere 中,找不到 VM %s" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "无法在XenServer中找到虚拟机%s" - -msgid "Wrong sensor type" -msgstr "错误的传感器类型" - -msgid "XenAPI not installed" -msgstr "XenAPI没有安装" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "读取定义文件%(file)s时遇到YAML错误" - -msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." -msgstr "Aodh 被禁用或不可用时,警报 URL 不可用。" - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "无法为虚拟机%(id)s获取CPU时间:%(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "启用 Gnocchi 后,direct 选项不能为 true。" - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "正在退出时间顺序样本:%s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "因为之前没有数据(用来计算差值)因而丢弃数据:%s" - -msgid "ipmitool output length mismatch" -msgstr "ipmi输出长度不匹配" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes和backup_count必须是整数。" - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "解析IPMI传感器数据失败,从给定的输入中无法检索到数据" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "解析IPMI传感器数据失败,未知的传感器类型" - -msgid "running ipmitool failure" -msgstr "运行ipmitool时失败了" diff --git a/ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po b/ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po deleted file mode 100644 index edfa9b08..00000000 --- a/ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po +++ /dev/null @@ -1,455 +0,0 @@ -# Translations template for ceilometer. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the ceilometer project. -# -# Translators: -# Stefano Maffulli , 2013 -# Andreas Jaeger , 2016. #zanata -# Jennifer , 2016. #zanata -msgid "" -msgstr "" -"Project-Id-Version: ceilometer 6.0.1.dev170\n" -"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-07 17:37+0000\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-04-15 06:03+0000\n" -"Last-Translator: Jennifer \n" -"Language: zh-TW\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"Generated-By: Babel 2.0\n" -"X-Generator: Zanata 3.7.3\n" -"Language-Team: Chinese (Taiwan)\n" - -#, python-format -msgid "%(entity)s %(id)s Not Found" -msgstr "找不到 %(entity)s %(id)s" - -#, python-format -msgid "Arithmetic transformer must use at least one meter in expression '%s'" -msgstr "在表示式 '%s' 中,算術轉換器必須至少使用一種計量" - -#, python-format -msgid "Cannot create table %(table_name)s it already exists. Ignoring error" -msgstr "無法建立表格 %(table_name)s,該表格已經存在。將忽略錯誤" - -#, python-format -msgid "Continue after error from %(name)s: %(error)s" -msgstr "在 %(name)s 傳回錯誤 %(error)s 後繼續" - -#, python-format -msgid "Could not connect slave host: %s " -msgstr "無法連接附屬主機:%s" - -#, python-format -msgid "Could not connect to XenAPI: %s" -msgstr "無法連接 XenAPI:%s" - -#, python-format -msgid "Could not get CPU Util for %(id)s: %(e)s" -msgstr "無法取得 %(id)s 的 CPU 使用率:%(e)s" - -#, python-format -msgid "Could not get Memory Usage for %(id)s: %(e)s" -msgstr "無法取得 %(id)s 的記憶體用量:%(e)s" - -#, python-format -msgid "Could not get VM %s CPU Utilization" -msgstr "無法取得 VM %s CPU 使用率" - -#, python-format -msgid "Couldn't obtain IP address of instance %s" -msgstr "無法取得實例 %s 的 IP 位址" - -#, python-format -msgid "Dropping Notification %(type)s (uuid:%(msgid)s)" -msgstr "正在捨棄通知 %(type)s(UUID:%(msgid)s)" - -#, python-format -msgid "" -"Error from libvirt while looking up instance : " -"[Error Code %(error_code)s] %(ex)s" -msgstr "" -"查閱實例 <名稱=%(name)s,ID=%(id)s> 時,libvirt 中發生錯誤:[錯誤碼 " -"%(error_code)s] %(ex)s" - -#, python-format -msgid "Error parsing HTTP response: %s" -msgstr "剖析 HTTP 回應時發生錯誤:%s" - -msgid "Error stopping pollster." -msgstr "停止 pollster 時發生錯誤。" - -msgid "Event" -msgstr "事件" - -msgid "Expression evaluated to a NaN value!" -msgstr "表示式已求值為非數字值!" - -#, python-format -msgid "Failed to import extension for %(name)s: %(error)s" -msgstr "無法匯入 %(name)s 的延伸:%(error)s" - -#, python-format -msgid "" -"Failed to inspect data of instance , domain state " -"is SHUTOFF." -msgstr "無法檢查實例 <名稱=%(name)s,ID=%(id)s> 的資料,網域狀態為 SHUTOFF。" - -#, python-format -msgid "" -"Failed to inspect memory usage of %(instance_uuid)s, can not get info from " -"libvirt: %(error)s" -msgstr "" -"無法檢查 %(instance_uuid)s 的記憶體用量,無法從 libVirt 取得資訊:%(error)s" - -#, python-format -msgid "" -"Failed to inspect memory usage of instance , can " -"not get info from libvirt." -msgstr "" -"無法檢查實例 <名稱=%(name)s,ID=%(id)s> 的記憶體用量,無法從 libVirt 取得資" -"訊。" - -#, python-format -msgid "Failed to load any notification handlers for %s" -msgstr "無法載入 %s 的任何通知處理程式" - -#, python-format -msgid "Failed to parse the timestamp value %s" -msgstr "無法剖析時間戳記值 %s" - -#, python-format -msgid "Failed to publish %d datapoints, dropping them" -msgstr "無法發佈 %d 個資料點,正在捨棄它們" - -#, python-format -msgid "Failed to publish %d datapoints, queue them" -msgstr "無法發佈 %d 個資料點,正在將它們排入佇列" - -#, python-format -msgid "Filter expression not valid: %s" -msgstr "過濾表示式無效:%s" - -#, python-format -msgid "Ignoring instance %(name)s (%(instance_id)s) : %(error)s" -msgstr "正在忽略實例 %(name)s (%(instance_id)s):%(error)s" - -#, python-format -msgid "Ignoring instance %(name)s: %(error)s" -msgstr "正在忽略實例 %(name)s:%(error)s" - -#, python-format -msgid "Ignoring loadbalancer %(loadbalancer_id)s" -msgstr "正在忽略負載平衡器 %(loadbalancer_id)s" - -#, python-format -msgid "Ignoring pool %(pool_id)s" -msgstr "正在忽略儲存區 %(pool_id)s" - -#, python-format -msgid "" -"Invalid YAML syntax in Definitions file %(file)s at line: %(line)s, column: " -"%(column)s." -msgstr "定義檔 %(file)s 第 %(line)s 行第 %(column)s 列中的 YAML 語法無效。" - -#, python-format -msgid "Invalid period %(period)s: %(err)s" -msgstr "期間 %(period)s 無效:%(err)s" - -#, python-format -msgid "Invalid trait type '%(type)s' for trait %(trait)s" -msgstr "特徵 %(trait)s 的特徵類型 '%(type)s' 無效" - -msgid "Limit must be positive" -msgstr "限制值必須是正數" - -#, python-format -msgid "More than one event with id %s returned from storage driver" -msgstr "從儲存體驅動程式傳回了多個 ID 為 %s 的事件" - -#, python-format -msgid "Multiple VM %s found in XenServer" -msgstr "在 XenServer 中找到多個 VM %s" - -msgid "Must specify connection_url, and connection_password to use" -msgstr "必須指定 connection_url 和 connection_password,才能使用" - -#, python-format -msgid "No plugin named %(plugin)s available for %(name)s" -msgstr "沒有名為 %(plugin)s 的外掛程式可供 %(name)s 使用" - -msgid "Node Manager init failed" -msgstr "節點管理程式起始設定失敗" - -#, python-format -msgid "Not Authorized to access %(aspect)s %(id)s" -msgstr "未獲授權來存取 %(aspect)s %(id)s" - -#, python-format -msgid "OpenDaylitght API returned %(status)s %(reason)s" -msgstr "OpenDaylight API 傳回了 %(status)s %(reason)s" - -#, python-format -msgid "Opencontrail API returned %(status)s %(reason)s" -msgstr "Opencontrail API 傳回了 %(status)s %(reason)s" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. Only equality operator is available " -"for field %(field)s" -msgstr "運算子 %(operator)s 不受支援。只有等式運算子才可供欄位 %(field)s 使用" - -#, python-format -msgid "" -"Operator %(operator)s is not supported. The supported operators are: " -"%(supported)s" -msgstr "運算子 %(operator)s 不受支援。受支援的運算子為:%(supported)s" - -#, python-format -msgid "Order-by expression not valid: %s" -msgstr "排序方式表示式無效:%s" - -#, python-format -msgid "" -"Parse error in JSONPath specification '%(jsonpath)s' for %(name)s: %(err)s" -msgstr "%(name)s 的 JSONPath 規格 '%(jsonpath)s' 中發生剖析錯誤:%(err)s" - -msgid "Period must be positive." -msgstr "期間必須是正數。" - -#, python-format -msgid "Pipeline %(pipeline)s: %(status)s after error from publisher %(pub)s" -msgstr "管線 %(pipeline)s:在發佈者 %(pub)s 傳回錯誤後處於%(status)s狀態" - -#, python-format -msgid "Pipeline %(pipeline)s: Continue after error from publisher %(pub)s" -msgstr "管線 %(pipeline)s:在發佈者 %(pub)s 傳回錯誤後繼續" - -#, python-format -msgid "Pipeline %(pipeline)s: Error flushing transformer %(trans)s" -msgstr "管線 %(pipeline)s:清除轉換器 %(trans)s 時發生錯誤" - -#, python-format -msgid "" -"Pipeline %(pipeline)s: Exit after error from transformer %(trans)s for " -"%(smp)s" -msgstr "管線 %(pipeline)s:%(smp)s 的轉換器 %(trans)s傳回錯誤後結束" - -#, python-format -msgid "Plugin specified, but no plugin name supplied for %s" -msgstr "已指定外掛程式,但卻未向 %s 提供外掛程式名稱" - -#, python-format -msgid "Polling %(mtr)s sensor failed for %(cnt)s times!" -msgstr "輪詢 %(mtr)s 感應器已失敗 %(cnt)s 次!" - -#, python-format -msgid "Polling %(name)s failed for %(cnt)s times!" -msgstr "輪詢 %(name)s 失敗了 %(cnt)s 次!" - -#, python-format -msgid "Pollster for %s is disabled!" -msgstr "已停用 %s 的 Pollster!" - -#, python-format -msgid "Prevent pollster %(name)s for polling source %(source)s anymore!" -msgstr "阻止 pollster %(name)s 再次輪詢資源 %(source)s!" - -#, python-format -msgid "" -"Publisher max local_queue length is exceeded, dropping %d oldest samples" -msgstr "已超出發佈者 local_queue 長度上限,正在捨棄 %d 個最舊的樣本" - -#, python-format -msgid "Publishing policy is unknown (%s) force to default" -msgstr "發佈原則不明 (%s),強制設為預設值" - -#, python-format -msgid "RGW AdminOps API returned %(status)s %(reason)s" -msgstr "RGW AdminOps API 傳回了 %(status)s %(reason)s" - -msgid "Request failed to connect to OpenDaylight with NorthBound REST API" -msgstr "要求無法使用 NorthBound REST API 來連接至 OpenDaylight" - -#, python-format -msgid "Required field %s not specified" -msgstr "未指定必要欄位 %s" - -msgid "Resource" -msgstr "資源" - -msgid "Sample" -msgstr "樣本" - -msgid "Samples should be included in request body" -msgstr "要求內文中應該包括範例" - -#, python-format -msgid "Skip loading extension for %s" -msgstr "跳過載入 %s 的延伸" - -#, python-format -msgid "String %s is not a valid isotime" -msgstr "字串 %s 不是有效的 ISO 時間" - -msgid "" -"The Yaml file that defines mapping between samples and gnocchi resources/" -"metrics" -msgstr "用來在範例與 gnocchi 資源/度量之間定義對映的Yaml 檔案" - -#, python-format -msgid "" -"The data type %(type)s is not supported. The supported data type list is: " -"%(supported)s" -msgstr "不支援資料類型 %(type)s。支援的資料類型清單為:%(supported)s" - -#, python-format -msgid "The field 'fields' is required for %s" -msgstr "%s 需要欄位「欄位」" - -msgid "The path for the file publisher is required" -msgstr "需要檔案發佈者的路徑" - -#, python-format -msgid "UDP: Cannot decode data sent by %s" -msgstr "UDP:無法解碼由 %s 傳送的資料" - -msgid "UDP: Unable to store meter" -msgstr "UDP:無法儲存計量" - -#, python-format -msgid "Unable to connect to the database server: %(errmsg)s." -msgstr "無法連接至資料庫伺服器:%(errmsg)s。" - -#, python-format -msgid "" -"Unable to convert the value %(value)s to the expected data type %(type)s." -msgstr "無法將值 %(value)s 轉換成預期的資料類型 %(type)s。" - -#, python-format -msgid "Unable to discover resources: %s" -msgstr "無法探索資源:%s" - -#, python-format -msgid "Unable to evaluate expression %(expr)s: %(exc)s" -msgstr "無法對表示式 %(expr)s 進行求值:%(exc)s" - -#, python-format -msgid "Unable to load publisher %s" -msgstr "無法載入發佈者 %s" - -#, python-format -msgid "Unable to load the hypervisor inspector: %s" -msgstr "無法載入 Hypervisor 檢查程式:%s" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb after %(retries)d retries. Giving " -"up." -msgstr "在 %(retries)d 次重試之後仍無法重新連接至主要 MongoDB。正在放棄。" - -#, python-format -msgid "" -"Unable to reconnect to the primary mongodb: %(errmsg)s. Trying again in " -"%(retry_interval)d seconds." -msgstr "" -"無法重新連接至主要 MongoDB:%(errmsg)s。請在%(retry_interval)d 秒之後再次嘗" -"試。" - -msgid "Unable to send sample over UDP" -msgstr "無法透過 UDP 來傳送樣本" - -#, python-format -msgid "" -"Unexpected exception converting %(value)s to the expected data type %(type)s." -msgstr "將 %(value)s 轉換為預期的資料類型%(type)s 時發生非預期的異常狀況。" - -#, python-format -msgid "Unknown discovery extension: %s" -msgstr "不明的探索延伸:%s" - -#, python-format -msgid "Unknown metadata type. Key (%s) will not be queryable." -msgstr "不明的 meta 資料類型。索引鍵 (%s) 將不可查詢。" - -#, python-format -msgid "" -"Unknown status %(stat)s received on Load Balancer %(id)s, skipping sample" -msgstr "在負載平衡器 %(id)s 上接收到不明狀態 %(stat)s,正在跳過範例" - -#, python-format -msgid "Unknown status %(stat)s received on fw %(id)s,skipping sample" -msgstr "在防火牆 %(id)s 上接收到不明狀態 %(stat)s,正在跳過範例" - -#, python-format -msgid "Unknown status %(stat)s received on listener %(id)s, skipping sample" -msgstr "在接聽器 %(id)s 上接收到不明狀態 %(stat)s,正在跳過範例" - -#, python-format -msgid "Unknown status %(stat)s received on member %(id)s, skipping sample" -msgstr "在成員 %(id)s 上接收到不明狀態 %(stat)s,正在跳過範例" - -#, python-format -msgid "Unknown status %(stat)s received on pool %(id)s, skipping sample" -msgstr "在儲存區 %(id)s 上接收到不明狀態 %(stat)s,正在跳過範例" - -#, python-format -msgid "Unknown status %(stat)s received on vip %(id)s, skipping sample" -msgstr "在 VIP %(id)s 上接收到不明狀態 %(stat)s,正在跳過範例" - -#, python-format -msgid "Unknown status %(stat)s received on vpn %(id)s, skipping sample" -msgstr "在 VPN %(id)s 上接收到不明狀態 %(stat)s,正在跳過範例" - -#, python-format -msgid "VM %s not found in VMware vSphere" -msgstr "在 VMware vSphere 中找不到 VM %s" - -#, python-format -msgid "VM %s not found in XenServer" -msgstr "在 XenServer 中找不到 VM %s" - -msgid "Wrong sensor type" -msgstr "感應器類型錯誤" - -msgid "XenAPI not installed" -msgstr "未安裝 XenAPI" - -#, python-format -msgid "YAML error reading Definitions file %(file)s" -msgstr "讀取定義檔 %(file)s 時發生 YAML 錯誤" - -msgid "alarms URLs is unavailable when Aodh is disabled or unavailable." -msgstr "當已停用或無法使用 Aodh 時,無法使用警示 URL" - -#, python-format -msgid "could not get CPU time for %(id)s: %(e)s" -msgstr "無法取得 %(id)s 的 CPU 時間:%(e)s" - -msgid "direct option cannot be true when Gnocchi is enabled." -msgstr "已啟用 Gnocchi 時,直接選項不能為 true。" - -#, python-format -msgid "dropping out of time order sample: %s" -msgstr "正在刪除不在時間順序內的範例:%s" - -#, python-format -msgid "dropping sample with no predecessor: %s" -msgstr "正在捨棄不含前一版本的樣本:%s" - -msgid "ipmitool output length mismatch" -msgstr "ipmitool 輸出長度不符" - -msgid "max_bytes and backup_count should be numbers." -msgstr "max_bytes 及 backup_count 應該是數字。" - -msgid "parse IPMI sensor data failed,No data retrieved from given input" -msgstr "剖析 IPMI 感應器資料失敗,未從給定的輸入擷取任何資料" - -msgid "parse IPMI sensor data failed,unknown sensor type" -msgstr "剖析 IPMI 感應器資料失敗,感應器類型不明" - -msgid "running ipmitool failure" -msgstr "執行 ipmitool 失敗" diff --git a/ceilometer/messaging.py b/ceilometer/messaging.py deleted file mode 100644 index 7b325077..00000000 --- a/ceilometer/messaging.py +++ /dev/null @@ -1,88 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2013-2015 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import oslo_messaging -from oslo_messaging import serializer as oslo_serializer - -DEFAULT_URL = "__default__" -TRANSPORTS = {} - - -def setup(): - oslo_messaging.set_transport_defaults('ceilometer') - - -def get_transport(url=None, optional=False, cache=True): - """Initialise the oslo_messaging layer.""" - global TRANSPORTS, DEFAULT_URL - cache_key = url or DEFAULT_URL - transport = TRANSPORTS.get(cache_key) - if not transport or not cache: - try: - transport = oslo_messaging.get_transport(cfg.CONF, url) - except (oslo_messaging.InvalidTransportURL, - oslo_messaging.DriverLoadFailure): - if not optional or url: - # NOTE(sileht): oslo_messaging is configured but unloadable - # so reraise the exception - raise - return None - else: - if cache: - TRANSPORTS[cache_key] = transport - return transport - - -def cleanup(): - """Cleanup the oslo_messaging layer.""" - global TRANSPORTS, NOTIFIERS - NOTIFIERS = {} - for url in TRANSPORTS: - TRANSPORTS[url].cleanup() - del TRANSPORTS[url] - - -_SERIALIZER = oslo_serializer.JsonPayloadSerializer() - - -def get_batch_notification_listener(transport, targets, endpoints, - allow_requeue=False, - batch_size=1, batch_timeout=None): - """Return a configured oslo_messaging notification listener.""" - return oslo_messaging.get_batch_notification_listener( - transport, targets, endpoints, executor='threading', - allow_requeue=allow_requeue, - batch_size=batch_size, batch_timeout=batch_timeout) - - -def get_notifier(transport, publisher_id): - """Return a configured oslo_messaging notifier.""" - notifier = oslo_messaging.Notifier(transport, serializer=_SERIALIZER) - return notifier.prepare(publisher_id=publisher_id) - - -def convert_to_old_notification_format(priority, notification): - # FIXME(sileht): temporary convert notification to old format - # to focus on oslo_messaging migration before refactoring the code to - # use the new oslo_messaging facilities - notification = notification.copy() - notification['priority'] = priority - notification.update(notification["metadata"]) - for k in notification['ctxt']: - notification['_context_' + k] = notification['ctxt'][k] - del notification['ctxt'] - del notification['metadata'] - return notification diff --git a/ceilometer/meter/__init__.py b/ceilometer/meter/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/meter/data/meters.yaml b/ceilometer/meter/data/meters.yaml deleted file mode 100644 index 650b0309..00000000 --- a/ceilometer/meter/data/meters.yaml +++ /dev/null @@ -1,815 +0,0 @@ ---- - -metric: - # Image - - name: "image.size" - event_type: - - "image.upload" - - "image.delete" - - "image.update" - type: "gauge" - unit: B - volume: $.payload.size - resource_id: $.payload.id - project_id: $.payload.owner - - - name: "image.download" - event_type: "image.send" - type: "delta" - unit: "B" - volume: $.payload.bytes_sent - resource_id: $.payload.image_id - user_id: $.payload.receiver_user_id - project_id: $.payload.receiver_tenant_id - - - name: "image.serve" - event_type: "image.send" - type: "delta" - unit: "B" - volume: $.payload.bytes_sent - resource_id: $.payload.image_id - project_id: $.payload.owner_id - - - name: 'volume.size' - event_type: - - 'volume.exists' - - 'volume.create.*' - - 'volume.delete.*' - - 'volume.resize.*' - - 'volume.attach.*' - - 'volume.detach.*' - - 'volume.update.*' - type: 'gauge' - unit: 'GB' - volume: $.payload.size - user_id: $.payload.user_id - project_id: $.payload.tenant_id - resource_id: $.payload.volume_id - - - name: 'snapshot.size' - event_type: - - 'snapshot.exists' - - 'snapshot.create.*' - - 'snapshot.delete.*' - type: 'gauge' - unit: 'GB' - volume: $.payload.volume_size - user_id: $.payload.user_id - project_id: $.payload.tenant_id - resource_id: $.payload.snapshot_id - - # Magnum - - name: $.payload.metrics.[*].name - event_type: 'magnum.bay.metrics.*' - type: 'gauge' - unit: $.payload.metrics.[*].unit - volume: $.payload.metrics.[*].value - user_id: $.payload.user_id - project_id: $.payload.project_id - resource_id: $.payload.resource_id - lookup: ['name', 'unit', 'volume'] - - # Swift - - name: $.payload.measurements.[*].metric.[*].name - event_type: 'objectstore.http.request' - type: 'delta' - unit: $.payload.measurements.[*].metric.[*].unit - volume: $.payload.measurements.[*].result - resource_id: $.payload.target.id - user_id: $.payload.initiator.id - project_id: $.payload.initiator.project_id - lookup: ['name', 'unit', 'volume'] - - - name: 'memory' - event_type: 'compute.instance.*' - type: 'gauge' - unit: 'MB' - volume: $.payload.memory_mb - user_id: $.payload.user_id - project_id: $.payload.tenant_id - resource_id: $.payload.instance_id - - - name: 'vcpus' - event_type: 'compute.instance.*' - type: 'gauge' - unit: 'vcpu' - volume: $.payload.vcpus - user_id: $.payload.user_id - project_id: $.payload.tenant_id - resource_id: $.payload.instance_id - - - name: 'compute.instance.booting.time' - event_type: 'compute.instance.create.end' - type: 'gauge' - unit: 'sec' - volume: - fields: [$.payload.created_at, $.payload.launched_at] - plugin: 'timedelta' - project_id: $.payload.tenant_id - resource_id: $.payload.instance_id - - - name: 'disk.root.size' - event_type: 'compute.instance.*' - type: 'gauge' - unit: 'GB' - volume: $.payload.root_gb - user_id: $.payload.user_id - project_id: $.payload.tenant_id - resource_id: $.payload.instance_id - - - name: 'disk.ephemeral.size' - event_type: 'compute.instance.*' - type: 'gauge' - unit: 'GB' - volume: $.payload.ephemeral_gb - user_id: $.payload.user_id - project_id: $.payload.tenant_id - resource_id: $.payload.instance_id - - - name: 'bandwidth' - event_type: 'l3.meter' - type: 'delta' - unit: 'B' - volume: $.payload.bytes - project_id: $.payload.tenant_id - resource_id: $.payload.label_id - - - name: 'compute.node.cpu.frequency' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'MHz' - volume: $.payload.metrics[?(@.name='cpu.frequency')].value - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.frequency')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.frequency')].source - - - name: 'compute.node.cpu.user.time' - event_type: 'compute.metrics.update' - type: 'cumulative' - unit: 'ns' - volume: $.payload.metrics[?(@.name='cpu.user.time')].value - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.user.time')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.user.time')].source - - - name: 'compute.node.cpu.kernel.time' - event_type: 'compute.metrics.update' - type: 'cumulative' - unit: 'ns' - volume: $.payload.metrics[?(@.name='cpu.kernel.time')].value - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.kernel.time')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.kernel.time')].source - - - name: 'compute.node.cpu.idle.time' - event_type: 'compute.metrics.update' - type: 'cumulative' - unit: 'ns' - volume: $.payload.metrics[?(@.name='cpu.idle.time')].value - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.idle.time')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.idle.time')].source - - - name: 'compute.node.cpu.iowait.time' - event_type: 'compute.metrics.update' - type: 'cumulative' - unit: 'ns' - volume: $.payload.metrics[?(@.name='cpu.iowait.time')].value - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.iowait.time')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.iowait.time')].source - - - name: 'compute.node.cpu.kernel.percent' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'percent' - volume: $.payload.metrics[?(@.name='cpu.kernel.percent')].value * 100 - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.kernel.percent')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.kernel.percent')].source - - - name: 'compute.node.cpu.idle.percent' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'percent' - volume: $.payload.metrics[?(@.name='cpu.idle.percent')].value * 100 - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.idle.percent')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.idle.percent')].source - - - name: 'compute.node.cpu.user.percent' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'percent' - volume: $.payload.metrics[?(@.name='cpu.user.percent')].value * 100 - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.user.percent')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.user.percent')].source - - - name: 'compute.node.cpu.iowait.percent' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'percent' - volume: $.payload.metrics[?(@.name='cpu.iowait.percent')].value * 100 - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.iowait.percent')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.iowait.percent')].source - - - name: 'compute.node.cpu.percent' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'percent' - volume: $.payload.metrics[?(@.name='cpu.percent')].value * 100 - resource_id: $.payload.host + "_" + $.payload.nodename - timestamp: $.payload.metrics[?(@.name='cpu.percent')].timestamp - metadata: - event_type: $.event_type - host: $.publisher_id - source: $.payload.metrics[?(@.name='cpu.percent')].source - - # DNS - - name: 'dns.domain.exists' - event_type: 'dns.domain.exists' - type: 'cumulative' - unit: 's' - volume: - fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] - plugin: 'timedelta' - project_id: $.payload.tenant_id - resource_id: $.payload.id - user_id: $._context_user - metadata: - status: $.payload.status - pool_id: $.payload.pool_id - host: $.publisher_id - - # Trove - - name: 'trove.instance.exists' - event_type: 'trove.instance.exists' - type: 'cumulative' - unit: 's' - volume: - fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] - plugin: 'timedelta' - project_id: $.payload.tenant_id - resource_id: $.payload.instance_id - user_id: $.payload.user_id - metadata: - nova_instance_id: $.payload.nova_instance_id - state: $.payload.state - service_id: $.payload.service_id - instance_type: $.payload.instance_type - instance_type_id: $.payload.instance_type_id - - -# NOTE: non-metric meters are generally events/existence meters -# These are DEPRECATED in current release and expected to be -# REMOVED in the next upcoming release. -# - # Image - - name: "image" - event_type: - - "image.upload" - - "image.update" - - "image.delete" - type: "gauge" - unit: 'image' - volume: 1 - resource_id: $.payload.id - project_id: $.payload.owner - - - name: "image.upload" - event_type: - - "image.upload" - type: "gauge" - unit: 'image' - volume: 1 - resource_id: $.payload.id - project_id: $.payload.owner - - - name: "image.delete" - event_type: - - "image.delete" - type: "gauge" - unit: 'image' - volume: 1 - resource_id: $.payload.id - project_id: $.payload.owner - - - name: "image.update" - event_type: - - "image.update" - type: "gauge" - unit: 'image' - volume: 1 - resource_id: $.payload.id - project_id: $.payload.owner - - # Orchestration - - name: 'stack.create' - event_type: - - 'orchestration.stack.create.end' - type: 'delta' - unit: 'stack' - volume: 1 - user_id: _context_trustor_user_id - project_id: $.payload.tenant_id - resource_id: $.payload.stack_identity - - - name: 'stack.update' - event_type: - - 'orchestration.stack.update.end' - type: 'delta' - unit: 'stack' - volume: 1 - user_id: _context_trustor_user_id - project_id: $.payload.tenant_id - resource_id: $.payload.stack_identity - - - name: 'stack.delete' - event_type: - - 'orchestration.stack.delete.end' - type: 'delta' - unit: 'stack' - volume: 1 - user_id: _context_trustor_user_id - project_id: $.payload.tenant_id - resource_id: $.payload.stack_identity - - - name: 'stack.resume' - event_type: - - 'orchestration.stack.resume.end' - type: 'delta' - unit: 'stack' - volume: 1 - user_id: _context_trustor_user_id - project_id: $.payload.tenant_id - resource_id: $.payload.stack_identity - - - name: 'stack.suspend' - event_type: - - 'orchestration.stack.suspend.end' - type: 'delta' - unit: 'stack' - volume: 1 - user_id: _context_trustor_user_id - project_id: $.payload.tenant_id - resource_id: $.payload.stack_identity - - # Volume - - name: 'volume' - type: 'gauge' - unit: 'volume' - volume: 1 - event_type: - - 'volume.exists' - - 'volume.create.*' - - 'volume.delete.*' - - 'volume.resize.*' - - 'volume.attach.*' - - 'volume.detach.*' - - 'volume.update.*' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.exists' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.exists' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.create.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.create.start' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.create.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.create.end' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.delete.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.delete.start' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.delete.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.delete.end' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.update.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.update.end' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.update.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.update.start' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.resize.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.resize.end' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.resize.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.resize.start' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - - name: 'volume.attach.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.attach.end' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.attach.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.attach.start' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.detach.end' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.detach.end' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'volume.detach.start' - type: 'delta' - unit: 'volume' - volume: 1 - event_type: - - 'volume.detach.start' - resource_id: $.payload.volume_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - # Volume Snapshot - - name: 'snapshot' - type: 'gauge' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.exists' - - 'snapshot.create.*' - - 'snapshot.delete.*' - - resource_id: $.payload.snapshot_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'snapshot.exists' - type: 'delta' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.exists' - resource_id: $.payload.snapshot_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'snapshot.create.start' - type: 'delta' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.create.start' - resource_id: $.payload.snapshot_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'snapshot.create.end' - type: 'delta' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.create.end' - resource_id: $.payload.snapshot_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'snapshot.delete.start' - type: 'delta' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.delete.start' - resource_id: $.payload.snapshot_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - - name: 'snapshot.delete.end' - type: 'delta' - unit: 'snapshot' - volume: 1 - event_type: - - 'snapshot.delete.end' - resource_id: $.payload.snapshot_id - user_id: $.payload.user_id - project_id: $.payload.tenant_id - - # Sahara - - name: 'cluster.create' - type: 'delta' - unit: 'cluster' - volume: 1 - event_type: - - 'sahara.cluster.create' - resource_id: $.payload.cluster_id - project_id: $.payload.project_id - - - name: 'cluster.update' - type: 'delta' - unit: 'cluster' - volume: 1 - event_type: - - 'sahara.cluster.update' - resource_id: $.payload.cluster_id - project_id: $.payload.project_id - - - name: 'cluster.delete' - type: 'delta' - unit: 'cluster' - volume: 1 - event_type: - - 'sahara.cluster.delete' - resource_id: $.payload.cluster_id - project_id: $.payload.project_id - - # Identity - - name: 'identity.user.created' - type: 'delta' - unit: 'user' - volume: 1 - event_type: - - 'identity.user.created' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.user.updated' - type: 'delta' - unit: 'user' - volume: 1 - event_type: - - 'identity.user.updated' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.user.deleted' - type: 'delta' - unit: 'user' - volume: 1 - event_type: - - 'identity.user.deleted' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.group.created' - type: 'delta' - unit: 'group' - volume: 1 - event_type: - - 'identity.group.created' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.group.updated' - type: 'delta' - unit: 'group' - volume: 1 - event_type: - - 'identity.group.updated' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.group.deleted' - type: 'delta' - unit: 'group' - volume: 1 - event_type: - - 'identity.group.deleted' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.project.created' - type: 'delta' - unit: 'project' - volume: 1 - event_type: - - 'identity.project.created' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.project.updated' - type: 'delta' - unit: 'project' - volume: 1 - event_type: - - 'identity.project.updated' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.project.deleted' - type: 'delta' - unit: 'project' - volume: 1 - event_type: - - 'identity.project.deleted' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.role.created' - type: 'delta' - unit: 'role' - volume: 1 - event_type: - - 'identity.role.created' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.role.updated' - type: 'delta' - unit: 'role' - volume: 1 - event_type: - - 'identity.role.updated' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.role.deleted' - type: 'delta' - unit: 'role' - volume: 1 - event_type: - - 'identity.role.deleted' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.role_assignment.created' - type: 'delta' - unit: 'role_assignment' - volume: 1 - event_type: - - 'identity.role_assignment.created' - resource_id: $.payload.role - user_id: $.payload.initiator.id - - - name: 'identity.role_assignment.deleted' - type: 'delta' - unit: 'role_assignment' - volume: 1 - event_type: - - 'identity.role_assignment.deleted' - resource_id: $.payload.role - user_id: $.payload.initiator.id - - - name: 'identity.authenticate.success' - type: 'delta' - unit: 'user' - volume: 1 - event_type: - - 'identity.authenticate' - resource_id: $.payload.initiator.id - user_id: $.payload.initiator.id - - - name: 'identity.authenticate.pending' - type: 'delta' - unit: 'user' - volume: 1 - event_type: - - 'identity.authenticate' - resource_id: $.payload.initiator.id - user_id: $.payload.initiator.id - - - name: 'identity.authenticate.failure' - type: 'delta' - unit: 'user' - volume: 1 - event_type: - - 'identity.authenticate' - resource_id: $.payload.initiator.id - user_id: $.payload.initiator.id - - - name: 'identity.trust.created' - type: 'delta' - unit: 'trust' - volume: 1 - event_type: - - 'identity.OS-TRUST:trust.created' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'identity.trust.deleted' - type: 'delta' - unit: 'trust' - volume: 1 - event_type: - - 'identity.OS-TRUST:trust.deleted' - resource_id: $.payload.resource_info - user_id: $.payload.initiator.id - - - name: 'storage.api.request' - type: 'delta' - unit: 'request' - volume: 1 - event_type: - - 'objectstore.http.request' - resource_id: $.payload.target.id - user_id: $.payload.initiator.id - project_id: $.payload.initiator.project_id - - - name: '$.payload.name' - event_type: 'profiler.*' - type: 'gauge' - unit: 'trace' - volume: 1 - user_id: $.payload.user_id - project_id: $.payload.project_id - resource_id: '"profiler-" + $.payload.base_id' diff --git a/ceilometer/meter/notifications.py b/ceilometer/meter/notifications.py deleted file mode 100644 index efb57213..00000000 --- a/ceilometer/meter/notifications.py +++ /dev/null @@ -1,230 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools -import pkg_resources -import six - -from oslo_config import cfg -from oslo_log import log -import oslo_messaging -from oslo_utils import fnmatch -from stevedore import extension - -from ceilometer.agent import plugin_base -from ceilometer import declarative -from ceilometer.i18n import _LE, _LW -from ceilometer import sample - -OPTS = [ - cfg.StrOpt('meter_definitions_cfg_file', - default="meters.yaml", - help="Configuration file for defining meter notifications." - ), -] - -cfg.CONF.register_opts(OPTS, group='meter') -cfg.CONF.import_opt('disable_non_metric_meters', 'ceilometer.notification', - group='notification') - -LOG = log.getLogger(__name__) - - -class MeterDefinition(object): - - SAMPLE_ATTRIBUTES = ["name", "type", "volume", "unit", "timestamp", - "user_id", "project_id", "resource_id"] - - REQUIRED_FIELDS = ['name', 'type', 'event_type', 'unit', 'volume', - 'resource_id'] - - def __init__(self, definition_cfg, plugin_manager): - self.cfg = definition_cfg - missing = [field for field in self.REQUIRED_FIELDS - if not self.cfg.get(field)] - if missing: - raise declarative.MeterDefinitionException( - _LE("Required fields %s not specified") % missing, self.cfg) - - self._event_type = self.cfg.get('event_type') - if isinstance(self._event_type, six.string_types): - self._event_type = [self._event_type] - - if ('type' not in self.cfg.get('lookup', []) and - self.cfg['type'] not in sample.TYPES): - raise declarative.MeterDefinitionException( - _LE("Invalid type %s specified") % self.cfg['type'], self.cfg) - - self._fallback_user_id = declarative.Definition( - 'user_id', "_context_user_id|_context_user", plugin_manager) - self._fallback_project_id = declarative.Definition( - 'project_id', "_context_tenant_id|_context_tenant", plugin_manager) - self._attributes = {} - self._metadata_attributes = {} - - for name in self.SAMPLE_ATTRIBUTES: - attr_cfg = self.cfg.get(name) - if attr_cfg: - self._attributes[name] = declarative.Definition( - name, attr_cfg, plugin_manager) - metadata = self.cfg.get('metadata', {}) - for name in metadata: - self._metadata_attributes[name] = declarative.Definition( - name, metadata[name], plugin_manager) - - # List of fields we expected when multiple meter are in the payload - self.lookup = self.cfg.get('lookup') - if isinstance(self.lookup, six.string_types): - self.lookup = [self.lookup] - - def match_type(self, meter_name): - for t in self._event_type: - if fnmatch.fnmatch(meter_name, t): - return True - - def to_samples(self, message, all_values=False): - # Sample defaults - sample = { - 'name': self.cfg["name"], 'type': self.cfg["type"], - 'unit': self.cfg["unit"], 'volume': None, 'timestamp': None, - 'user_id': self._fallback_user_id.parse(message), - 'project_id': self._fallback_project_id.parse(message), - 'resource_id': None, 'message': message, 'metadata': {}, - } - for name, parser in self._metadata_attributes.items(): - value = parser.parse(message) - if value: - sample['metadata'][name] = value - - # NOTE(sileht): We expect multiple samples in the payload - # so put each attribute into a list - if self.lookup: - for name in sample: - sample[name] = [sample[name]] - - for name in self.SAMPLE_ATTRIBUTES: - parser = self._attributes.get(name) - if parser is not None: - value = parser.parse(message, bool(self.lookup)) - # NOTE(sileht): If we expect multiple samples - # some attributes are overridden even we don't get any - # result. Also note in this case value is always a list - if ((not self.lookup and value is not None) or - (self.lookup and ((name in self.lookup + ["name"]) - or value))): - sample[name] = value - - if self.lookup: - nb_samples = len(sample['name']) - # skip if no meters in payload - if nb_samples <= 0: - raise StopIteration - - attributes = self.SAMPLE_ATTRIBUTES + ["message", "metadata"] - - samples_values = [] - for name in attributes: - values = sample.get(name) - nb_values = len(values) - if nb_values == nb_samples: - samples_values.append(values) - elif nb_values == 1 and name not in self.lookup: - samples_values.append(itertools.cycle(values)) - else: - nb = (0 if nb_values == 1 and values[0] is None - else nb_values) - LOG.warning('Only %(nb)d fetched meters contain ' - '"%(name)s" field instead of %(total)d.' % - dict(name=name, nb=nb, - total=nb_samples)) - raise StopIteration - - # NOTE(sileht): Transform the sample with multiple values per - # attribute into multiple samples with one value per attribute. - for values in zip(*samples_values): - yield dict((attributes[idx], value) - for idx, value in enumerate(values)) - else: - yield sample - - -class ProcessMeterNotifications(plugin_base.NotificationBase): - - event_types = [] - - def __init__(self, manager): - super(ProcessMeterNotifications, self).__init__(manager) - self.definitions = self._load_definitions() - - @staticmethod - def _load_definitions(): - plugin_manager = extension.ExtensionManager( - namespace='ceilometer.event.trait_plugin') - meters_cfg = declarative.load_definitions( - {}, cfg.CONF.meter.meter_definitions_cfg_file, - pkg_resources.resource_filename(__name__, "data/meters.yaml")) - - definitions = {} - for meter_cfg in reversed(meters_cfg['metric']): - if meter_cfg.get('name') in definitions: - # skip duplicate meters - LOG.warning(_LW("Skipping duplicate meter definition %s") - % meter_cfg) - continue - if (meter_cfg.get('volume') != 1 - or not cfg.CONF.notification.disable_non_metric_meters): - try: - md = MeterDefinition(meter_cfg, plugin_manager) - except declarative.DefinitionException as e: - errmsg = _LE("Error loading meter definition: %s") - LOG.error(errmsg, six.text_type(e)) - else: - definitions[meter_cfg['name']] = md - return definitions.values() - - def get_targets(self, conf): - """Return a sequence of oslo_messaging.Target - - It is defining the exchange and topics to be connected for this plugin. - :param conf: Configuration. - #TODO(prad): This should be defined in the notification agent - """ - targets = [] - exchanges = [ - conf.nova_control_exchange, - conf.cinder_control_exchange, - conf.glance_control_exchange, - conf.neutron_control_exchange, - conf.heat_control_exchange, - conf.keystone_control_exchange, - conf.sahara_control_exchange, - conf.trove_control_exchange, - conf.zaqar_control_exchange, - conf.swift_control_exchange, - conf.ceilometer_control_exchange, - conf.magnum_control_exchange, - conf.dns_control_exchange, - ] - - for exchange in exchanges: - targets.extend(oslo_messaging.Target(topic=topic, - exchange=exchange) - for topic in - self.get_notification_topics(conf)) - return targets - - def process_notification(self, notification_body): - for d in self.definitions: - if d.match_type(notification_body['event_type']): - for s in d.to_samples(notification_body): - yield sample.Sample.from_notification(**s) diff --git a/ceilometer/middleware.py b/ceilometer/middleware.py deleted file mode 100644 index 3901c3e2..00000000 --- a/ceilometer/middleware.py +++ /dev/null @@ -1,71 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import oslo_messaging - -from ceilometer.agent import plugin_base -from ceilometer import sample - -cfg.CONF.import_opt('nova_control_exchange', - 'ceilometer.compute.notifications') -cfg.CONF.import_opt('glance_control_exchange', - 'ceilometer.notification') -cfg.CONF.import_opt('neutron_control_exchange', - 'ceilometer.network.notifications') -cfg.CONF.import_opt('cinder_control_exchange', - 'ceilometer.notification') - -OPTS = [ - cfg.MultiStrOpt('http_control_exchanges', - default=[cfg.CONF.nova_control_exchange, - cfg.CONF.glance_control_exchange, - cfg.CONF.neutron_control_exchange, - cfg.CONF.cinder_control_exchange], - help="Exchanges name to listen for notifications."), -] - -cfg.CONF.register_opts(OPTS) - - -class HTTPRequest(plugin_base.NotificationBase, - plugin_base.NonMetricNotificationBase): - event_types = ['http.request'] - - def get_targets(self, conf): - """Return a sequence of oslo_messaging.Target - - This sequence is defining the exchange and topics to be connected for - this plugin. - """ - return [oslo_messaging.Target(topic=topic, exchange=exchange) - for topic in self.get_notification_topics(conf) - for exchange in conf.http_control_exchanges] - - def process_notification(self, message): - yield sample.Sample.from_notification( - name=message['event_type'], - type=sample.TYPE_DELTA, - volume=1, - unit=message['event_type'].split('.')[1], - user_id=message['payload']['request'].get('HTTP_X_USER_ID'), - project_id=message['payload']['request'].get('HTTP_X_PROJECT_ID'), - resource_id=message['payload']['request'].get( - 'HTTP_X_SERVICE_NAME'), - message=message) - - -class HTTPResponse(HTTPRequest): - event_types = ['http.response'] diff --git a/ceilometer/network/__init__.py b/ceilometer/network/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/network/floatingip.py b/ceilometer/network/floatingip.py deleted file mode 100644 index ce178704..00000000 --- a/ceilometer/network/floatingip.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2016 Sungard Availability Services -# Copyright 2016 Red Hat -# Copyright 2012 eNovance -# Copyright 2013 IBM Corp -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log - -from ceilometer.i18n import _LW -from ceilometer.network.services import base -from ceilometer import sample - -LOG = log.getLogger(__name__) - -cfg.CONF.import_group('service_types', 'ceilometer.neutron_client') - - -class FloatingIPPollster(base.BaseServicesPollster): - - FIELDS = ['router_id', - 'status', - 'floating_network_id', - 'fixed_ip_address', - 'port_id', - 'floating_ip_address', - ] - - @property - def default_discovery(self): - return 'fip_services' - - def get_samples(self, manager, cache, resources): - - for fip in resources or []: - if fip['status'] is None: - LOG.warning(_LW("Invalid status, skipping IP address %s") % - fip['floating_ip_address']) - continue - status = self.get_status_id(fip['status']) - yield sample.Sample( - name='ip.floating', - type=sample.TYPE_GAUGE, - unit='ip', - volume=status, - user_id=fip.get('user_id'), - project_id=fip['tenant_id'], - resource_id=fip['id'], - resource_metadata=self.extract_metadata(fip) - ) diff --git a/ceilometer/network/notifications.py b/ceilometer/network/notifications.py deleted file mode 100644 index 6a196abe..00000000 --- a/ceilometer/network/notifications.py +++ /dev/null @@ -1,258 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Handler for producing network counter messages from Neutron notification - events. - -""" - -from oslo_config import cfg -import oslo_messaging - -from ceilometer.agent import plugin_base -from ceilometer import sample - -OPTS = [ - cfg.StrOpt('neutron_control_exchange', - default='neutron', - help="Exchange name for Neutron notifications."), -] - -cfg.CONF.register_opts(OPTS) - - -class NetworkNotificationBase(plugin_base.NotificationBase): - - resource_name = None - - @property - def event_types(self): - return [ - # NOTE(flwang): When the *.create.start notification sending, - # there is no resource id assigned by Neutron yet. So we ignore - # the *.create.start notification for now and only listen the - # *.create.end to make sure the resource id is existed. - '%s.create.end' % self.resource_name, - '%s.update.*' % self.resource_name, - '%s.exists' % self.resource_name, - # FIXME(dhellmann): Neutron delete notifications do - # not include the same metadata as the other messages, - # so we ignore them for now. This isn't ideal, since - # it may mean we miss charging for some amount of time, - # but it is better than throwing away the existing - # metadata for a resource when it is deleted. - # '%s.delete.start' % (self.resource_name), - ] - - def get_targets(self, conf): - """Return a sequence of oslo_messaging.Target - - This sequence is defining the exchange and topics to be connected for - this plugin. - """ - return [oslo_messaging.Target(topic=topic, - exchange=conf.neutron_control_exchange) - for topic in self.get_notification_topics(conf)] - - def process_notification(self, message): - counter_name = getattr(self, 'counter_name', self.resource_name) - unit_value = getattr(self, 'unit', self.resource_name) - - resource = message['payload'].get(self.resource_name) - if resource: - # NOTE(liusheng): In %s.update.start notifications, the id is in - # message['payload'] instead of resource itself. - if message['event_type'].endswith('update.start'): - resource['id'] = message['payload']['id'] - resources = [resource] - else: - resources = message['payload'].get(self.resource_name + 's', []) - - resource_message = message.copy() - for resource in resources: - resource_message['payload'] = resource - yield sample.Sample.from_notification( - name=counter_name, - type=sample.TYPE_GAUGE, - unit=unit_value, - volume=1, - user_id=resource_message['_context_user_id'], - project_id=resource_message['_context_tenant_id'], - resource_id=resource['id'], - message=resource_message) - event_type_split = resource_message['event_type'].split('.') - if len(event_type_split) > 2: - yield sample.Sample.from_notification( - name=counter_name - + "." + event_type_split[1], - type=sample.TYPE_DELTA, - unit=unit_value, - volume=1, - user_id=resource_message['_context_user_id'], - project_id=resource_message['_context_tenant_id'], - resource_id=resource['id'], - message=resource_message) - - -class Network(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): - """Listen for Neutron network notifications. - - Handle network.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'network' - - -class Subnet(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle subnet.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'subnet' - - -class Port(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle port.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'port' - - -class Router(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle router.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'router' - - -class FloatingIP(NetworkNotificationBase, - plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle floatingip.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'floatingip' - counter_name = 'ip.floating' - unit = 'ip' - - -class Pool(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle pool.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'pool' - counter_name = 'network.services.lb.pool' - - -class Vip(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle vip.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'vip' - counter_name = 'network.services.lb.vip' - - -class Member(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle member.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'member' - counter_name = 'network.services.lb.member' - - -class HealthMonitor(NetworkNotificationBase, - plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle health_monitor.{create.end|update.*|exists} notifications - from neutron. - """ - resource_name = 'health_monitor' - counter_name = 'network.services.lb.health_monitor' - - -class Firewall(NetworkNotificationBase, plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle firewall.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'firewall' - counter_name = 'network.services.firewall' - - -class FirewallPolicy(NetworkNotificationBase, - plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle firewall_policy.{create.end|update.*|exists} notifications - from neutron. - """ - resource_name = 'firewall_policy' - counter_name = 'network.services.firewall.policy' - - -class FirewallRule(NetworkNotificationBase, - plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle firewall_rule.{create.end|update.*|exists} notifications - from neutron. - """ - resource_name = 'firewall_rule' - counter_name = 'network.services.firewall.rule' - - -class VPNService(NetworkNotificationBase, - plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle vpnservice.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'vpnservice' - counter_name = 'network.services.vpn' - - -class IPSecPolicy(NetworkNotificationBase, - plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle pool.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'ipsecpolicy' - counter_name = 'network.services.vpn.ipsecpolicy' - - -class IKEPolicy(NetworkNotificationBase, - plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle ikepolicy.{create.end|update.*|exists} notifications from neutron. - """ - resource_name = 'ikepolicy' - counter_name = 'network.services.vpn.ikepolicy' - - -class IPSecSiteConnection(NetworkNotificationBase, - plugin_base.NonMetricNotificationBase): - """Listen for Neutron notifications. - - Handle ipsec_site_connection.{create.end|update.*|exists} - notifications from neutron. - """ - resource_name = 'ipsec_site_connection' - counter_name = 'network.services.vpn.connections' diff --git a/ceilometer/network/services/__init__.py b/ceilometer/network/services/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/network/services/base.py b/ceilometer/network/services/base.py deleted file mode 100644 index 4aa666bd..00000000 --- a/ceilometer/network/services/base.py +++ /dev/null @@ -1,48 +0,0 @@ -# -# Copyright 2014 Cisco Systems,Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.agent import plugin_base - - -# status map for converting metric status to volume int -STATUS = { - 'inactive': 0, - 'active': 1, - 'pending_create': 2, - 'down': 3, - 'created': 4, - 'pending_update': 5, - 'pending_delete': 6, - 'error': 7, -} - - -class BaseServicesPollster(plugin_base.PollsterBase): - - FIELDS = [] - - @staticmethod - def _iter_cache(cache, meter_name, method): - if meter_name not in cache: - cache[meter_name] = list(method()) - return iter(cache[meter_name]) - - def extract_metadata(self, metric): - return dict((k, metric[k]) for k in self.FIELDS) - - @staticmethod - def get_status_id(value): - status = value.lower() - return STATUS.get(status, -1) diff --git a/ceilometer/network/services/discovery.py b/ceilometer/network/services/discovery.py deleted file mode 100644 index f20af9d4..00000000 --- a/ceilometer/network/services/discovery.py +++ /dev/null @@ -1,118 +0,0 @@ -# -# Copyright (c) 2014 Cisco Systems, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.agent import plugin_base -from ceilometer import neutron_client - - -class _BaseServicesDiscovery(plugin_base.DiscoveryBase): - KEYSTONE_REQUIRED_FOR_SERVICE = 'neutron' - - def __init__(self): - super(_BaseServicesDiscovery, self).__init__() - self.neutron_cli = neutron_client.Client() - - -class LBPoolsDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover resources to monitor.""" - - pools = self.neutron_cli.pool_get_all() - return [i for i in pools - if i.get('status') != 'error'] - - -class LBVipsDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover resources to monitor.""" - - vips = self.neutron_cli.vip_get_all() - return [i for i in vips - if i.get('status', None) != 'error'] - - -class LBMembersDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover resources to monitor.""" - - members = self.neutron_cli.member_get_all() - return [i for i in members - if i.get('status', None) != 'error'] - - -class LBListenersDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover load balancer listener resources to monitor.""" - - listeners = self.neutron_cli.list_listener() - return [i for i in listeners - if i.get('operating_status', None) != 'error'] - - -class LBLoadBalancersDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover load balancer resources to monitor.""" - - loadbalancers = self.neutron_cli.list_loadbalancer() - return [i for i in loadbalancers - if i.get('operating_status', None) != 'error'] - - -class LBHealthMonitorsDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover resources to monitor.""" - - probes = self.neutron_cli.health_monitor_get_all() - return probes - - -class VPNServicesDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover resources to monitor.""" - - vpnservices = self.neutron_cli.vpn_get_all() - return [i for i in vpnservices - if i.get('status', None) != 'error'] - - -class IPSecConnectionsDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover resources to monitor.""" - - conns = self.neutron_cli.ipsec_site_connections_get_all() - return conns - - -class FirewallDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover resources to monitor.""" - - fw = self.neutron_cli.firewall_get_all() - return [i for i in fw - if i.get('status', None) != 'error'] - - -class FirewallPolicyDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover resources to monitor.""" - - return self.neutron_cli.fw_policy_get_all() - - -class FloatingIPDiscovery(_BaseServicesDiscovery): - def discover(self, manager, param=None): - """Discover floating IP resources to monitor.""" - - return self.neutron_cli.fip_get_all() diff --git a/ceilometer/network/services/fwaas.py b/ceilometer/network/services/fwaas.py deleted file mode 100644 index 7b827613..00000000 --- a/ceilometer/network/services/fwaas.py +++ /dev/null @@ -1,94 +0,0 @@ -# -# Copyright 2014 Cisco Systems,Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log - -from ceilometer.i18n import _ -from ceilometer.network.services import base -from ceilometer import sample - -LOG = log.getLogger(__name__) - - -class FirewallPollster(base.BaseServicesPollster): - """Pollster to capture firewalls status samples.""" - - FIELDS = ['admin_state_up', - 'description', - 'name', - 'status', - 'firewall_policy_id', - ] - - @property - def default_discovery(self): - return 'fw_services' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for fw in resources: - LOG.debug("Firewall : %s" % fw) - status = self.get_status_id(fw['status']) - if status == -1: - # unknown status, skip this sample - LOG.warning(_("Unknown status %(stat)s received on fw %(id)s," - "skipping sample") % {'stat': fw['status'], - 'id': fw['id']}) - continue - - yield sample.Sample( - name='network.services.firewall', - type=sample.TYPE_GAUGE, - unit='firewall', - volume=status, - user_id=None, - project_id=fw['tenant_id'], - resource_id=fw['id'], - resource_metadata=self.extract_metadata(fw) - ) - - -class FirewallPolicyPollster(base.BaseServicesPollster): - """Pollster to capture firewall policy samples.""" - - FIELDS = ['name', - 'description', - 'name', - 'firewall_rules', - 'shared', - 'audited', - ] - - @property - def default_discovery(self): - return 'fw_policy' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for fw in resources: - LOG.debug("Firewall Policy: %s" % fw) - - yield sample.Sample( - name='network.services.firewall.policy', - type=sample.TYPE_GAUGE, - unit='firewall_policy', - volume=1, - user_id=None, - project_id=fw['tenant_id'], - resource_id=fw['id'], - resource_metadata=self.extract_metadata(fw) - ) diff --git a/ceilometer/network/services/lbaas.py b/ceilometer/network/services/lbaas.py deleted file mode 100644 index 5f33d22c..00000000 --- a/ceilometer/network/services/lbaas.py +++ /dev/null @@ -1,464 +0,0 @@ -# -# Copyright 2014 Cisco Systems,Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import collections - -from oslo_config import cfg -from oslo_log import log -import six - -from ceilometer.i18n import _ -from ceilometer.network.services import base -from ceilometer import neutron_client -from ceilometer import sample - -LOG = log.getLogger(__name__) - -LBStatsData = collections.namedtuple( - 'LBStats', - ['active_connections', 'total_connections', 'bytes_in', 'bytes_out'] -) - -LOAD_BALANCER_STATUS_V2 = { - 'offline': 0, - 'online': 1, - 'no_monitor': 3, - 'error': 4, - 'degraded': 5 -} - - -class BaseLBPollster(base.BaseServicesPollster): - """Base Class for Load Balancer pollster""" - - def __init__(self): - super(BaseLBPollster, self).__init__() - self.lb_version = cfg.CONF.service_types.neutron_lbaas_version - - def get_load_balancer_status_id(self, value): - if self.lb_version == 'v1': - resource_status = self.get_status_id(value) - elif self.lb_version == 'v2': - status = value.lower() - resource_status = LOAD_BALANCER_STATUS_V2.get(status, -1) - return resource_status - - -class LBPoolPollster(BaseLBPollster): - """Pollster to capture Load Balancer pool status samples.""" - - FIELDS = ['admin_state_up', - 'description', - 'lb_method', - 'name', - 'protocol', - 'provider', - 'status', - 'status_description', - 'subnet_id', - 'vip_id' - ] - - @property - def default_discovery(self): - return 'lb_pools' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for pool in resources: - LOG.debug("Load Balancer Pool : %s" % pool) - status = self.get_load_balancer_status_id(pool['status']) - if status == -1: - # unknown status, skip this sample - LOG.warning(_("Unknown status %(stat)s received on pool " - "%(id)s, skipping sample") - % {'stat': pool['status'], 'id': pool['id']}) - continue - - yield sample.Sample( - name='network.services.lb.pool', - type=sample.TYPE_GAUGE, - unit='pool', - volume=status, - user_id=None, - project_id=pool['tenant_id'], - resource_id=pool['id'], - resource_metadata=self.extract_metadata(pool) - ) - - -class LBVipPollster(base.BaseServicesPollster): - """Pollster to capture Load Balancer Vip status samples.""" - - FIELDS = ['admin_state_up', - 'address', - 'connection_limit', - 'description', - 'name', - 'pool_id', - 'port_id', - 'protocol', - 'protocol_port', - 'status', - 'status_description', - 'subnet_id', - 'session_persistence', - ] - - @property - def default_discovery(self): - return 'lb_vips' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for vip in resources: - LOG.debug("Load Balancer Vip : %s" % vip) - status = self.get_status_id(vip['status']) - if status == -1: - # unknown status, skip this sample - LOG.warning(_("Unknown status %(stat)s received on vip " - "%(id)s, skipping sample") - % {'stat': vip['status'], 'id': vip['id']}) - continue - - yield sample.Sample( - name='network.services.lb.vip', - type=sample.TYPE_GAUGE, - unit='vip', - volume=status, - user_id=None, - project_id=vip['tenant_id'], - resource_id=vip['id'], - resource_metadata=self.extract_metadata(vip) - ) - - -class LBMemberPollster(BaseLBPollster): - """Pollster to capture Load Balancer Member status samples.""" - - FIELDS = ['admin_state_up', - 'address', - 'pool_id', - 'protocol_port', - 'status', - 'status_description', - 'weight', - ] - - @property - def default_discovery(self): - return 'lb_members' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for member in resources: - LOG.debug("Load Balancer Member : %s" % member) - status = self.get_load_balancer_status_id(member['status']) - if status == -1: - LOG.warning(_("Unknown status %(stat)s received on member " - "%(id)s, skipping sample") - % {'stat': member['status'], 'id': member['id']}) - continue - yield sample.Sample( - name='network.services.lb.member', - type=sample.TYPE_GAUGE, - unit='member', - volume=status, - user_id=None, - project_id=member['tenant_id'], - resource_id=member['id'], - resource_metadata=self.extract_metadata(member) - ) - - -class LBHealthMonitorPollster(base.BaseServicesPollster): - """Pollster to capture Load Balancer Health probes status samples.""" - - FIELDS = ['admin_state_up', - 'delay', - 'max_retries', - 'pools', - 'timeout', - 'type' - ] - - @property - def default_discovery(self): - return 'lb_health_probes' - - def get_samples(self, manager, cache, resources): - for probe in resources: - LOG.debug("Load Balancer Health probe : %s" % probe) - yield sample.Sample( - name='network.services.lb.health_monitor', - type=sample.TYPE_GAUGE, - unit='health_monitor', - volume=1, - user_id=None, - project_id=probe['tenant_id'], - resource_id=probe['id'], - resource_metadata=self.extract_metadata(probe) - ) - - -@six.add_metaclass(abc.ABCMeta) -class _LBStatsPollster(base.BaseServicesPollster): - """Base Statistics pollster. - - It is capturing the statistics info and yielding samples for connections - and bandwidth. - """ - - def __init__(self): - super(_LBStatsPollster, self).__init__() - self.client = neutron_client.Client() - self.lb_version = cfg.CONF.service_types.neutron_lbaas_version - - @staticmethod - def make_sample_from_pool(pool, name, type, unit, volume, - resource_metadata=None): - if not resource_metadata: - resource_metadata = {} - return sample.Sample( - name=name, - type=type, - unit=unit, - volume=volume, - user_id=None, - project_id=pool['tenant_id'], - resource_id=pool['id'], - resource_metadata=resource_metadata, - ) - - def _populate_stats_cache(self, pool_id, cache): - i_cache = cache.setdefault("lbstats", {}) - if pool_id not in i_cache: - stats = self.client.pool_stats(pool_id)['stats'] - i_cache[pool_id] = LBStatsData( - active_connections=stats['active_connections'], - total_connections=stats['total_connections'], - bytes_in=stats['bytes_in'], - bytes_out=stats['bytes_out'], - ) - return i_cache[pool_id] - - def _populate_stats_cache_v2(self, loadbalancer_id, cache): - i_cache = cache.setdefault("lbstats", {}) - if loadbalancer_id not in i_cache: - stats = self.client.get_loadbalancer_stats(loadbalancer_id) - i_cache[loadbalancer_id] = LBStatsData( - active_connections=stats['active_connections'], - total_connections=stats['total_connections'], - bytes_in=stats['bytes_in'], - bytes_out=stats['bytes_out'], - ) - return i_cache[loadbalancer_id] - - @property - def default_discovery(self): - discovery_resource = 'lb_pools' - if self.lb_version == 'v2': - discovery_resource = 'lb_loadbalancers' - return discovery_resource - - @abc.abstractmethod - def _get_sample(pool, c_data): - """Return one Sample.""" - - def get_samples(self, manager, cache, resources): - if self.lb_version == 'v1': - for pool in resources: - try: - c_data = self._populate_stats_cache(pool['id'], cache) - yield self._get_sample(pool, c_data) - except Exception: - LOG.exception(_('Ignoring pool %(pool_id)s'), - {'pool_id': pool['id']}) - elif self.lb_version == 'v2': - for loadbalancer in resources: - try: - c_data = self._populate_stats_cache_v2(loadbalancer['id'], - cache) - yield self._get_sample(loadbalancer, c_data) - except Exception: - LOG.exception( - _('Ignoring ' - 'loadbalancer %(loadbalancer_id)s'), - {'loadbalancer_id': loadbalancer['id']}) - - -class LBActiveConnectionsPollster(_LBStatsPollster): - """Pollster to capture Active Load Balancer connections.""" - - @staticmethod - def _get_sample(pool, data): - return make_sample_from_pool( - pool, - name='network.services.lb.active.connections', - type=sample.TYPE_GAUGE, - unit='connection', - volume=data.active_connections, - ) - - -class LBTotalConnectionsPollster(_LBStatsPollster): - """Pollster to capture Total Load Balancer connections.""" - - @staticmethod - def _get_sample(pool, data): - return make_sample_from_pool( - pool, - name='network.services.lb.total.connections', - type=sample.TYPE_CUMULATIVE, - unit='connection', - volume=data.total_connections, - ) - - -class LBBytesInPollster(_LBStatsPollster): - """Pollster to capture incoming bytes.""" - - @staticmethod - def _get_sample(pool, data): - return make_sample_from_pool( - pool, - name='network.services.lb.incoming.bytes', - type=sample.TYPE_GAUGE, - unit='B', - volume=data.bytes_in, - ) - - -class LBBytesOutPollster(_LBStatsPollster): - """Pollster to capture outgoing bytes.""" - - @staticmethod - def _get_sample(pool, data): - return make_sample_from_pool( - pool, - name='network.services.lb.outgoing.bytes', - type=sample.TYPE_GAUGE, - unit='B', - volume=data.bytes_out, - ) - - -def make_sample_from_pool(pool, name, type, unit, volume, - resource_metadata=None): - resource_metadata = resource_metadata or {} - - return sample.Sample( - name=name, - type=type, - unit=unit, - volume=volume, - user_id=None, - project_id=pool['tenant_id'], - resource_id=pool['id'], - resource_metadata=resource_metadata, - ) - - -class LBListenerPollster(BaseLBPollster): - """Pollster to capture Load Balancer Listener status samples.""" - - FIELDS = ['admin_state_up', - 'connection_limit', - 'description', - 'name', - 'default_pool_id', - 'protocol', - 'protocol_port', - 'operating_status', - 'loadbalancers' - ] - - @property - def default_discovery(self): - return 'lb_listeners' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for listener in resources: - LOG.debug("Load Balancer Listener : %s" % listener) - status = self.get_load_balancer_status_id( - listener['operating_status']) - if status == -1: - # unknown status, skip this sample - LOG.warning(_("Unknown status %(stat)s received on listener " - "%(id)s, skipping sample") - % {'stat': listener['operating_status'], - 'id': listener['id']}) - continue - - yield sample.Sample( - name='network.services.lb.listener', - type=sample.TYPE_GAUGE, - unit='listener', - volume=status, - user_id=None, - project_id=listener['tenant_id'], - resource_id=listener['id'], - resource_metadata=self.extract_metadata(listener) - ) - - -class LBLoadBalancerPollster(BaseLBPollster): - """Pollster to capture Load Balancer status samples.""" - - FIELDS = ['admin_state_up', - 'description', - 'vip_address', - 'listeners', - 'name', - 'vip_subnet_id', - 'operating_status', - ] - - @property - def default_discovery(self): - return 'lb_loadbalancers' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for loadbalancer in resources: - LOG.debug("Load Balancer: %s" % loadbalancer) - status = self.get_load_balancer_status_id( - loadbalancer['operating_status']) - if status == -1: - # unknown status, skip this sample - LOG.warning(_("Unknown status %(stat)s received " - "on Load Balancer " - "%(id)s, skipping sample") - % {'stat': loadbalancer['operating_status'], - 'id': loadbalancer['id']}) - continue - - yield sample.Sample( - name='network.services.lb.loadbalancer', - type=sample.TYPE_GAUGE, - unit='loadbalancer', - volume=status, - user_id=None, - project_id=loadbalancer['tenant_id'], - resource_id=loadbalancer['id'], - resource_metadata=self.extract_metadata(loadbalancer) - ) diff --git a/ceilometer/network/services/vpnaas.py b/ceilometer/network/services/vpnaas.py deleted file mode 100644 index 948613d4..00000000 --- a/ceilometer/network/services/vpnaas.py +++ /dev/null @@ -1,104 +0,0 @@ -# -# Copyright 2014 Cisco Systems,Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log - -from ceilometer.i18n import _ -from ceilometer.network.services import base -from ceilometer import sample - -LOG = log.getLogger(__name__) - - -class VPNServicesPollster(base.BaseServicesPollster): - """Pollster to capture VPN status samples.""" - - FIELDS = ['admin_state_up', - 'description', - 'name', - 'status', - 'subnet_id', - 'router_id' - ] - - @property - def default_discovery(self): - return 'vpn_services' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for vpn in resources: - LOG.debug("VPN : %s" % vpn) - status = self.get_status_id(vpn['status']) - if status == -1: - # unknown status, skip this sample - LOG.warning(_("Unknown status %(stat)s received on vpn " - "%(id)s, skipping sample") - % {'stat': vpn['status'], 'id': vpn['id']}) - continue - - yield sample.Sample( - name='network.services.vpn', - type=sample.TYPE_GAUGE, - unit='vpnservice', - volume=status, - user_id=None, - project_id=vpn['tenant_id'], - resource_id=vpn['id'], - resource_metadata=self.extract_metadata(vpn) - ) - - -class IPSecConnectionsPollster(base.BaseServicesPollster): - """Pollster to capture vpn ipsec connections status samples.""" - - FIELDS = ['name', - 'description', - 'peer_address', - 'peer_id', - 'peer_cidrs', - 'psk', - 'initiator', - 'ikepolicy_id', - 'dpd', - 'ipsecpolicy_id', - 'vpnservice_id', - 'mtu', - 'admin_state_up', - 'status', - 'tenant_id' - ] - - @property - def default_discovery(self): - return 'ipsec_connections' - - def get_samples(self, manager, cache, resources): - resources = resources or [] - - for conn in resources: - LOG.debug("IPSec Connection Info: %s" % conn) - - yield sample.Sample( - name='network.services.vpn.connections', - type=sample.TYPE_GAUGE, - unit='ipsec_site_connection', - volume=1, - user_id=None, - project_id=conn['tenant_id'], - resource_id=conn['id'], - resource_metadata=self.extract_metadata(conn) - ) diff --git a/ceilometer/network/statistics/__init__.py b/ceilometer/network/statistics/__init__.py deleted file mode 100644 index f45ed710..00000000 --- a/ceilometer/network/statistics/__init__.py +++ /dev/null @@ -1,100 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from oslo_utils import netutils -import six -from six.moves.urllib import parse as urlparse -from stevedore import driver as _driver - -from ceilometer.agent import plugin_base -from ceilometer import sample - - -@six.add_metaclass(abc.ABCMeta) -class _Base(plugin_base.PollsterBase): - - NAMESPACE = 'network.statistics.drivers' - drivers = {} - - @property - def default_discovery(self): - # this signifies that the pollster gets its resources from - # elsewhere, in this case they're manually listed in the - # pipeline configuration - return None - - @abc.abstractproperty - def meter_name(self): - """Return a Meter Name.""" - - @abc.abstractproperty - def meter_type(self): - """Return a Meter Type.""" - - @abc.abstractproperty - def meter_unit(self): - """Return a Meter Unit.""" - - @staticmethod - def _parse_my_resource(resource): - - parse_url = netutils.urlsplit(resource) - - params = urlparse.parse_qs(parse_url.query) - parts = urlparse.ParseResult(parse_url.scheme, - parse_url.netloc, - parse_url.path, - None, - None, - None) - return parts, params - - @staticmethod - def get_driver(scheme): - if scheme not in _Base.drivers: - _Base.drivers[scheme] = _driver.DriverManager(_Base.NAMESPACE, - scheme).driver() - return _Base.drivers[scheme] - - def get_samples(self, manager, cache, resources): - resources = resources or [] - for resource in resources: - parse_url, params = self._parse_my_resource(resource) - ext = self.get_driver(parse_url.scheme) - sample_data = ext.get_sample_data(self.meter_name, - parse_url, - params, - cache) - - for data in sample_data or []: - if data is None: - continue - if not isinstance(data, list): - data = [data] - for (volume, resource_id, - resource_metadata) in data: - - yield sample.Sample( - name=self.meter_name, - type=self.meter_type, - unit=self.meter_unit, - volume=volume, - user_id=None, - project_id=None, - resource_id=resource_id, - resource_metadata=resource_metadata - ) diff --git a/ceilometer/network/statistics/driver.py b/ceilometer/network/statistics/driver.py deleted file mode 100644 index 0eb82550..00000000 --- a/ceilometer/network/statistics/driver.py +++ /dev/null @@ -1,29 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import six - - -@six.add_metaclass(abc.ABCMeta) -class Driver(object): - - @abc.abstractmethod - def get_sample_data(self, meter_name, parse_url, params, cache): - """Return volume, resource_id, resource_metadata, timestamp in tuple. - - If not implemented for meter_name, returns None - """ diff --git a/ceilometer/network/statistics/flow.py b/ceilometer/network/statistics/flow.py deleted file mode 100644 index b23b6424..00000000 --- a/ceilometer/network/statistics/flow.py +++ /dev/null @@ -1,53 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from ceilometer.network import statistics -from ceilometer import sample - - -class FlowPollster(statistics._Base): - - meter_name = 'switch.flow' - meter_type = sample.TYPE_GAUGE - meter_unit = 'flow' - - -class FlowPollsterDurationSeconds(statistics._Base): - - meter_name = 'switch.flow.duration_seconds' - meter_type = sample.TYPE_GAUGE - meter_unit = 's' - - -class FlowPollsterDurationNanoseconds(statistics._Base): - - meter_name = 'switch.flow.duration_nanoseconds' - meter_type = sample.TYPE_GAUGE - meter_unit = 'ns' - - -class FlowPollsterPackets(statistics._Base): - - meter_name = 'switch.flow.packets' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class FlowPollsterBytes(statistics._Base): - - meter_name = 'switch.flow.bytes' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'B' diff --git a/ceilometer/network/statistics/opencontrail/__init__.py b/ceilometer/network/statistics/opencontrail/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/network/statistics/opencontrail/client.py b/ceilometer/network/statistics/opencontrail/client.py deleted file mode 100644 index a85512ff..00000000 --- a/ceilometer/network/statistics/opencontrail/client.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -from oslo_config import cfg -from oslo_log import log -import requests -import six -from six.moves.urllib import parse as urlparse - -from ceilometer.i18n import _ - - -CONF = cfg.CONF -CONF.import_opt('http_timeout', 'ceilometer.service') - - -LOG = log.getLogger(__name__) - - -class OpencontrailAPIFailed(Exception): - pass - - -class AnalyticsAPIBaseClient(object): - """Opencontrail Base Statistics REST API Client.""" - - def __init__(self, endpoint, data): - self.endpoint = endpoint - self.data = data or {} - - def request(self, path, fqdn_uuid, data=None): - req_data = copy.copy(self.data) - if data: - req_data.update(data) - - req_params = self._get_req_params(data=req_data) - - url = urlparse.urljoin(self.endpoint, path + fqdn_uuid) - self._log_req(url, req_params) - resp = requests.get(url, **req_params) - self._log_res(resp) - - if resp.status_code != 200: - raise OpencontrailAPIFailed( - _('Opencontrail API returned %(status)s %(reason)s') % - {'status': resp.status_code, 'reason': resp.reason}) - - return resp - - def _get_req_params(self, data=None): - req_params = { - 'headers': { - 'Accept': 'application/json' - }, - 'data': data, - 'allow_redirects': False, - 'timeout': CONF.http_timeout, - } - - return req_params - - @staticmethod - def _log_req(url, req_params): - if not CONF.debug: - return - - curl_command = ['REQ: curl -i -X GET '] - - params = [] - for name, value in six.iteritems(req_params['data']): - params.append("%s=%s" % (name, value)) - - curl_command.append('"%s?%s" ' % (url, '&'.join(params))) - - for name, value in six.iteritems(req_params['headers']): - curl_command.append('-H "%s: %s" ' % (name, value)) - - LOG.debug(''.join(curl_command)) - - @staticmethod - def _log_res(resp): - if not CONF.debug: - return - - dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version, - resp.status_code, - resp.reason)] - dump.extend('%s: %s\n' % (k, v) - for k, v in six.iteritems(resp.headers)) - dump.append('\n') - if resp.content: - dump.extend([resp.content, '\n']) - - LOG.debug(''.join(dump)) - - -class NetworksAPIClient(AnalyticsAPIBaseClient): - """Opencontrail Statistics REST API Client.""" - - def get_vm_statistics(self, fqdn_uuid, data=None): - """Get statistics of a virtual-machines. - - URL: - {endpoint}/analytics/uves/virtual-machine/{fqdn_uuid} - """ - - path = '/analytics/uves/virtual-machine/' - resp = self.request(path, fqdn_uuid, data) - - return resp.json() - - -class Client(object): - - def __init__(self, endpoint, data=None): - self.networks = NetworksAPIClient(endpoint, data) diff --git a/ceilometer/network/statistics/opencontrail/driver.py b/ceilometer/network/statistics/opencontrail/driver.py deleted file mode 100644 index 5cb32dee..00000000 --- a/ceilometer/network/statistics/opencontrail/driver.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re - -from six.moves.urllib import parse as urlparse - -from ceilometer.network.statistics import driver -from ceilometer.network.statistics.opencontrail import client -from ceilometer import neutron_client - - -class OpencontrailDriver(driver.Driver): - """Driver of network analytics of Opencontrail. - - This driver uses resources in "pipeline.yaml". - - Resource requires below conditions: - - * resource is url - * scheme is "opencontrail" - - This driver can be configured via query parameters. - Supported parameters: - - * scheme: - The scheme of request url to Opencontrail Analytics endpoint. - (default "http") - * virtual_network - Specify the virtual network. - (default None) - * fqdn_uuid: - Specify the VM fqdn UUID. - (default "*") - * resource: - The resource on which the counters are retrieved. - (default "if_stats_list") - - * fip_stats_list: - Traffic on floating ips - * if_stats_list: - Traffic on VM interfaces - - e.g.:: - - opencontrail://localhost:8081/?resource=fip_stats_list& - virtual_network=default-domain:openstack:public - """ - @staticmethod - def _prepare_cache(endpoint, params, cache): - - if 'network.statistics.opencontrail' in cache: - return cache['network.statistics.opencontrail'] - - data = { - 'o_client': client.Client(endpoint), - 'n_client': neutron_client.Client() - } - - cache['network.statistics.opencontrail'] = data - - return data - - def get_sample_data(self, meter_name, parse_url, params, cache): - - parts = urlparse.ParseResult(params.get('scheme', ['http'])[0], - parse_url.netloc, - parse_url.path, - None, - None, - None) - endpoint = urlparse.urlunparse(parts) - - iter = self._get_iter(meter_name) - if iter is None: - # The extractor for this meter is not implemented or the API - # doesn't have method to get this meter. - return - - extractor = self._get_extractor(meter_name) - if extractor is None: - # The extractor for this meter is not implemented or the API - # doesn't have method to get this meter. - return - - data = self._prepare_cache(endpoint, params, cache) - - ports = data['n_client'].port_get_all() - ports_map = dict((port['id'], port) for port in ports) - - resource = params.get('resource', ['if_stats_list'])[0] - fqdn_uuid = params.get('fqdn_uuid', ['*'])[0] - virtual_network = params.get('virtual_network', [None])[0] - - statistics = data['o_client'].networks.get_vm_statistics(fqdn_uuid) - if not statistics: - return - - for value in statistics['value']: - for sample in iter(extractor, value, ports_map, - resource, virtual_network): - if sample is not None: - yield sample - - def _get_iter(self, meter_name): - if meter_name.startswith('switch.port'): - return self._iter_port - - def _get_extractor(self, meter_name): - method_name = '_' + meter_name.replace('.', '_') - return getattr(self, method_name, None) - - @staticmethod - def _explode_name(fq_name): - m = re.match( - "(?P[^:]+):(?P.+):(?P[^:]+)", - fq_name) - if not m: - return - return m.group('domain'), m.group('project'), m.group('port_id') - - @staticmethod - def _get_resource_meta(ports_map, stat, resource, network): - if resource == 'fip_stats_list': - if network and (network != stat['virtual_network']): - return - name = stat['iface_name'] - else: - name = stat['name'] - - domain, project, port_id = OpencontrailDriver._explode_name(name) - port = ports_map.get(port_id) - - tenant_id = None - network_id = None - device_owner_id = None - - if port: - tenant_id = port['tenant_id'] - network_id = port['network_id'] - device_owner_id = port['device_id'] - - resource_meta = {'device_owner_id': device_owner_id, - 'network_id': network_id, - 'project_id': tenant_id, - 'project': project, - 'resource': resource, - 'domain': domain} - - return port_id, resource_meta - - @staticmethod - def _iter_port(extractor, value, ports_map, resource, - virtual_network=None): - stats = value['value']['UveVirtualMachineAgent'].get(resource, []) - for stat in stats: - if type(stat) is list: - for sub_stats, node in zip(*[iter(stat)] * 2): - for sub_stat in sub_stats: - result = OpencontrailDriver._get_resource_meta( - ports_map, sub_stat, resource, virtual_network) - if not result: - continue - port_id, resource_meta = result - yield extractor(sub_stat, port_id, resource_meta) - else: - result = OpencontrailDriver._get_resource_meta( - ports_map, stat, resource, virtual_network) - if not result: - continue - port_id, resource_meta = result - yield extractor(stat, port_id, resource_meta) - - @staticmethod - def _switch_port_receive_packets(statistic, resource_id, resource_meta): - return int(statistic['in_pkts']), resource_id, resource_meta - - @staticmethod - def _switch_port_transmit_packets(statistic, resource_id, resource_meta): - return int(statistic['out_pkts']), resource_id, resource_meta - - @staticmethod - def _switch_port_receive_bytes(statistic, resource_id, resource_meta): - return int(statistic['in_bytes']), resource_id, resource_meta - - @staticmethod - def _switch_port_transmit_bytes(statistic, resource_id, resource_meta): - return int(statistic['out_bytes']), resource_id, resource_meta diff --git a/ceilometer/network/statistics/opendaylight/__init__.py b/ceilometer/network/statistics/opendaylight/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/network/statistics/opendaylight/client.py b/ceilometer/network/statistics/opendaylight/client.py deleted file mode 100644 index 21c9298b..00000000 --- a/ceilometer/network/statistics/opendaylight/client.py +++ /dev/null @@ -1,240 +0,0 @@ -# -# Copyright 2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from oslo_config import cfg -from oslo_log import log -import requests -from requests import auth -import six - -from ceilometer.i18n import _ - - -CONF = cfg.CONF -CONF.import_opt('http_timeout', 'ceilometer.service') - - -LOG = log.getLogger(__name__) - - -@six.add_metaclass(abc.ABCMeta) -class _Base(object): - """Base class of OpenDaylight REST APIs Clients.""" - - @abc.abstractproperty - def base_url(self): - """Returns base url for each REST API.""" - - def __init__(self, client): - self.client = client - - def request(self, path, container_name): - return self.client.request(self.base_url + path, container_name) - - -class OpenDaylightRESTAPIFailed(Exception): - pass - - -class StatisticsAPIClient(_Base): - """OpenDaylight Statistics REST API Client - - Base URL: - {endpoint}/statistics/{containerName} - """ - - base_url = '/statistics/%(container_name)s' - - def get_port_statistics(self, container_name): - """Get port statistics - - URL: - {Base URL}/port - """ - return self.request('/port', container_name) - - def get_flow_statistics(self, container_name): - """Get flow statistics - - URL: - {Base URL}/flow - """ - return self.request('/flow', container_name) - - def get_table_statistics(self, container_name): - """Get table statistics - - URL: - {Base URL}/table - """ - return self.request('/table', container_name) - - -class TopologyAPIClient(_Base): - """OpenDaylight Topology REST API Client - - Base URL: - {endpoint}/topology/{containerName} - """ - - base_url = '/topology/%(container_name)s' - - def get_topology(self, container_name): - """Get topology - - URL: - {Base URL} - """ - return self.request('', container_name) - - def get_user_links(self, container_name): - """Get user links - - URL: - {Base URL}/userLinks - """ - return self.request('/userLinks', container_name) - - -class SwitchManagerAPIClient(_Base): - """OpenDaylight Switch Manager REST API Client - - Base URL: - {endpoint}/switchmanager/{containerName} - """ - - base_url = '/switchmanager/%(container_name)s' - - def get_nodes(self, container_name): - """Get node information - - URL: - {Base URL}/nodes - """ - return self.request('/nodes', container_name) - - -class HostTrackerAPIClient(_Base): - """OpenDaylight Host Tracker REST API Client - - Base URL: - {endpoint}/hosttracker/{containerName} - """ - - base_url = '/hosttracker/%(container_name)s' - - def get_active_hosts(self, container_name): - """Get active hosts information - - URL: - {Base URL}/hosts/active - """ - return self.request('/hosts/active', container_name) - - def get_inactive_hosts(self, container_name): - """Get inactive hosts information - - URL: - {Base URL}/hosts/inactive - """ - return self.request('/hosts/inactive', container_name) - - -class Client(object): - - def __init__(self, endpoint, params): - self.statistics = StatisticsAPIClient(self) - self.topology = TopologyAPIClient(self) - self.switch_manager = SwitchManagerAPIClient(self) - self.host_tracker = HostTrackerAPIClient(self) - - self._endpoint = endpoint - - self._req_params = self._get_req_params(params) - - @staticmethod - def _get_req_params(params): - req_params = { - 'headers': { - 'Accept': 'application/json' - }, - 'timeout': CONF.http_timeout, - } - - auth_way = params.get('auth') - if auth_way in ['basic', 'digest']: - user = params.get('user') - password = params.get('password') - - if auth_way == 'basic': - auth_class = auth.HTTPBasicAuth - else: - auth_class = auth.HTTPDigestAuth - - req_params['auth'] = auth_class(user, password) - return req_params - - def _log_req(self, url): - - curl_command = ['REQ: curl -i -X GET ', '"%s" ' % (url)] - - if 'auth' in self._req_params: - auth_class = self._req_params['auth'] - if isinstance(auth_class, auth.HTTPBasicAuth): - curl_command.append('--basic ') - else: - curl_command.append('--digest ') - - curl_command.append('--user "%s":"%s" ' % (auth_class.username, - auth_class.password)) - - for name, value in six.iteritems(self._req_params['headers']): - curl_command.append('-H "%s: %s" ' % (name, value)) - - LOG.debug(''.join(curl_command)) - - @staticmethod - def _log_res(resp): - - dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version, - resp.status_code, - resp.reason)] - dump.extend('%s: %s\n' % (k, v) - for k, v in six.iteritems(resp.headers)) - dump.append('\n') - if resp.content: - dump.extend([resp.content, '\n']) - - LOG.debug(''.join(dump)) - - def _http_request(self, url): - if CONF.debug: - self._log_req(url) - resp = requests.get(url, **self._req_params) - if CONF.debug: - self._log_res(resp) - if resp.status_code // 100 != 2: - raise OpenDaylightRESTAPIFailed( - _('OpenDaylitght API returned %(status)s %(reason)s') % - {'status': resp.status_code, 'reason': resp.reason}) - - return resp.json() - - def request(self, path, container_name): - - url = self._endpoint + path % {'container_name': container_name} - return self._http_request(url) diff --git a/ceilometer/network/statistics/opendaylight/driver.py b/ceilometer/network/statistics/opendaylight/driver.py deleted file mode 100644 index bc78dbe5..00000000 --- a/ceilometer/network/statistics/opendaylight/driver.py +++ /dev/null @@ -1,448 +0,0 @@ -# -# Copyright 2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log -import six -from six import moves -from six.moves.urllib import parse as urlparse - -from ceilometer.i18n import _ -from ceilometer.network.statistics import driver -from ceilometer.network.statistics.opendaylight import client -from ceilometer import utils - - -LOG = log.getLogger(__name__) - - -def _get_properties(properties, prefix='properties'): - resource_meta = {} - if properties is not None: - for k, v in six.iteritems(properties): - value = v['value'] - key = prefix + '_' + k - if 'name' in v: - key += '_' + v['name'] - resource_meta[key] = value - return resource_meta - - -def _get_int_sample(key, statistic, resource_id, resource_meta): - if key not in statistic: - return None - return int(statistic[key]), resource_id, resource_meta - - -class OpenDayLightDriver(driver.Driver): - """Driver of network info collector from OpenDaylight. - - This driver uses resources in "pipeline.yaml". - Resource requires below conditions: - - * resource is url - * scheme is "opendaylight" - - This driver can be configured via query parameters. - Supported parameters: - - * scheme: - The scheme of request url to OpenDaylight REST API endpoint. - (default http) - * auth: - Auth strategy of http. - This parameter can be set basic and digest.(default None) - * user: - This is username that is used by auth.(default None) - * password: - This is password that is used by auth.(default None) - * container_name: - Name of container of OpenDaylight.(default "default") - This parameter allows multi values. - - e.g.:: - - opendaylight://127.0.0.1:8080/controller/nb/v2?container_name=default& - container_name=egg&auth=basic&user=admin&password=admin&scheme=http - - In this case, the driver send request to below URLs: - - http://127.0.0.1:8080/controller/nb/v2/statistics/default/flow - http://127.0.0.1:8080/controller/nb/v2/statistics/egg/flow - """ - @staticmethod - def _prepare_cache(endpoint, params, cache): - - if 'network.statistics.opendaylight' in cache: - return cache['network.statistics.opendaylight'] - - data = {} - - container_names = params.get('container_name', ['default']) - - odl_params = {} - if 'auth' in params: - odl_params['auth'] = params['auth'][0] - if 'user' in params: - odl_params['user'] = params['user'][0] - if 'password' in params: - odl_params['password'] = params['password'][0] - cs = client.Client(endpoint, odl_params) - - for container_name in container_names: - try: - container_data = {} - - # get flow statistics - container_data['flow'] = cs.statistics.get_flow_statistics( - container_name) - - # get port statistics - container_data['port'] = cs.statistics.get_port_statistics( - container_name) - - # get table statistics - container_data['table'] = cs.statistics.get_table_statistics( - container_name) - - # get topology - container_data['topology'] = cs.topology.get_topology( - container_name) - - # get switch information - container_data['switch'] = cs.switch_manager.get_nodes( - container_name) - - # get and optimize user links - # e.g. - # before: - # "OF|2@OF|00:00:00:00:00:00:00:02" - # after: - # { - # 'port': { - # 'type': 'OF', - # 'id': '2'}, - # 'node': { - # 'type': 'OF', - # 'id': '00:00:00:00:00:00:00:02' - # } - # } - user_links_raw = cs.topology.get_user_links(container_name) - user_links = [] - container_data['user_links'] = user_links - for user_link_row in user_links_raw['userLinks']: - user_link = {} - for k, v in six.iteritems(user_link_row): - if (k == "dstNodeConnector" or - k == "srcNodeConnector"): - port_raw, node_raw = v.split('@') - port = {} - port['type'], port['id'] = port_raw.split('|') - node = {} - node['type'], node['id'] = node_raw.split('|') - v = {'port': port, 'node': node} - user_link[k] = v - user_links.append(user_link) - - # get link status to hosts - container_data['active_hosts'] = ( - cs.host_tracker.get_active_hosts(container_name)) - container_data['inactive_hosts'] = ( - cs.host_tracker.get_inactive_hosts(container_name)) - data[container_name] = container_data - except Exception: - LOG.exception(_('Request failed to connect to OpenDaylight' - ' with NorthBound REST API')) - - cache['network.statistics.opendaylight'] = data - - return data - - def get_sample_data(self, meter_name, parse_url, params, cache): - - extractor = self._get_extractor(meter_name) - if extractor is None: - # The way to getting meter is not implemented in this driver or - # OpenDaylight REST API has not api to getting meter. - return None - - iter = self._get_iter(meter_name) - if iter is None: - # The way to getting meter is not implemented in this driver or - # OpenDaylight REST API has not api to getting meter. - return None - - parts = urlparse.ParseResult(params.get('scheme', ['http'])[0], - parse_url.netloc, - parse_url.path, - None, - None, - None) - endpoint = urlparse.urlunparse(parts) - - data = self._prepare_cache(endpoint, params, cache) - - samples = [] - for name, value in six.iteritems(data): - for sample in iter(extractor, value): - if sample is not None: - # set controller name and container name - # to resource_metadata - sample[2]['controller'] = 'OpenDaylight' - sample[2]['container'] = name - - samples.append(sample) - - return samples - - def _get_iter(self, meter_name): - if meter_name == 'switch': - return self._iter_switch - elif meter_name.startswith('switch.flow'): - return self._iter_flow - elif meter_name.startswith('switch.table'): - return self._iter_table - elif meter_name.startswith('switch.port'): - return self._iter_port - - def _get_extractor(self, meter_name): - method_name = '_' + meter_name.replace('.', '_') - return getattr(self, method_name, None) - - @staticmethod - def _iter_switch(extractor, data): - for switch in data['switch']['nodeProperties']: - yield extractor(switch, switch['node']['id'], {}) - - @staticmethod - def _switch(statistic, resource_id, resource_meta): - - resource_meta.update(_get_properties(statistic.get('properties'))) - - return 1, resource_id, resource_meta - - @staticmethod - def _iter_port(extractor, data): - for port_statistic in data['port']['portStatistics']: - for statistic in port_statistic['portStatistic']: - resource_meta = {'port': statistic['nodeConnector']['id']} - yield extractor(statistic, port_statistic['node']['id'], - resource_meta, data) - - @staticmethod - def _switch_port(statistic, resource_id, resource_meta, data): - my_node_id = resource_id - my_port_id = statistic['nodeConnector']['id'] - - # link status from topology - edge_properties = data['topology']['edgeProperties'] - for edge_property in edge_properties: - edge = edge_property['edge'] - - if (edge['headNodeConnector']['node']['id'] == my_node_id and - edge['headNodeConnector']['id'] == my_port_id): - target_node = edge['tailNodeConnector'] - elif (edge['tailNodeConnector']['node']['id'] == my_node_id and - edge['tailNodeConnector']['id'] == my_port_id): - target_node = edge['headNodeConnector'] - else: - continue - - resource_meta['topology_node_id'] = target_node['node']['id'] - resource_meta['topology_node_port'] = target_node['id'] - - resource_meta.update(_get_properties( - edge_property.get('properties'), - prefix='topology')) - - break - - # link status from user links - for user_link in data['user_links']: - if (user_link['dstNodeConnector']['node']['id'] == my_node_id and - user_link['dstNodeConnector']['port']['id'] == my_port_id): - target_node = user_link['srcNodeConnector'] - elif (user_link['srcNodeConnector']['node']['id'] == my_node_id and - user_link['srcNodeConnector']['port']['id'] == my_port_id): - target_node = user_link['dstNodeConnector'] - else: - continue - - resource_meta['user_link_node_id'] = target_node['node']['id'] - resource_meta['user_link_node_port'] = target_node['port']['id'] - resource_meta['user_link_status'] = user_link['status'] - resource_meta['user_link_name'] = user_link['name'] - - break - - # link status to hosts - for hosts, status in moves.zip( - [data['active_hosts'], data['inactive_hosts']], - ['active', 'inactive']): - for host_config in hosts['hostConfig']: - if (host_config['nodeId'] != my_node_id or - host_config['nodeConnectorId'] != my_port_id): - continue - - resource_meta['host_status'] = status - for key in ['dataLayerAddress', 'vlan', 'staticHost', - 'networkAddress']: - if key in host_config: - resource_meta['host_' + key] = host_config[key] - - break - - return 1, resource_id, resource_meta - - @staticmethod - def _switch_port_receive_packets(statistic, resource_id, - resource_meta, data): - return _get_int_sample('receivePackets', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_transmit_packets(statistic, resource_id, - resource_meta, data): - return _get_int_sample('transmitPackets', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_receive_bytes(statistic, resource_id, - resource_meta, data): - return _get_int_sample('receiveBytes', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_transmit_bytes(statistic, resource_id, - resource_meta, data): - return _get_int_sample('transmitBytes', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_receive_drops(statistic, resource_id, - resource_meta, data): - return _get_int_sample('receiveDrops', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_transmit_drops(statistic, resource_id, - resource_meta, data): - return _get_int_sample('transmitDrops', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_receive_errors(statistic, resource_id, - resource_meta, data): - return _get_int_sample('receiveErrors', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_transmit_errors(statistic, resource_id, - resource_meta, data): - return _get_int_sample('transmitErrors', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_receive_frame_error(statistic, resource_id, - resource_meta, data): - return _get_int_sample('receiveFrameError', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_receive_overrun_error(statistic, resource_id, - resource_meta, data): - return _get_int_sample('receiveOverRunError', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_receive_crc_error(statistic, resource_id, - resource_meta, data): - return _get_int_sample('receiveCrcError', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_port_collision_count(statistic, resource_id, - resource_meta, data): - return _get_int_sample('collisionCount', statistic, resource_id, - resource_meta) - - @staticmethod - def _iter_table(extractor, data): - for table_statistic in data['table']['tableStatistics']: - for statistic in table_statistic['tableStatistic']: - resource_meta = {'table_id': statistic['nodeTable']['id']} - yield extractor(statistic, - table_statistic['node']['id'], - resource_meta) - - @staticmethod - def _switch_table(statistic, resource_id, resource_meta): - return 1, resource_id, resource_meta - - @staticmethod - def _switch_table_active_entries(statistic, resource_id, - resource_meta): - return _get_int_sample('activeCount', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_table_lookup_packets(statistic, resource_id, - resource_meta): - return _get_int_sample('lookupCount', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_table_matched_packets(statistic, resource_id, - resource_meta): - return _get_int_sample('matchedCount', statistic, resource_id, - resource_meta) - - @staticmethod - def _iter_flow(extractor, data): - for flow_statistic in data['flow']['flowStatistics']: - for statistic in flow_statistic['flowStatistic']: - resource_meta = {'flow_id': statistic['flow']['id'], - 'table_id': statistic['tableId']} - for key, value in utils.dict_to_keyval(statistic['flow'], - 'flow'): - resource_meta[key.replace('.', '_')] = value - yield extractor(statistic, - flow_statistic['node']['id'], - resource_meta) - - @staticmethod - def _switch_flow(statistic, resource_id, resource_meta): - return 1, resource_id, resource_meta - - @staticmethod - def _switch_flow_duration_seconds(statistic, resource_id, - resource_meta): - return _get_int_sample('durationSeconds', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_flow_duration_nanoseconds(statistic, resource_id, - resource_meta): - return _get_int_sample('durationNanoseconds', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_flow_packets(statistic, resource_id, resource_meta): - return _get_int_sample('packetCount', statistic, resource_id, - resource_meta) - - @staticmethod - def _switch_flow_bytes(statistic, resource_id, resource_meta): - return _get_int_sample('byteCount', statistic, resource_id, - resource_meta) diff --git a/ceilometer/network/statistics/port.py b/ceilometer/network/statistics/port.py deleted file mode 100644 index d9039022..00000000 --- a/ceilometer/network/statistics/port.py +++ /dev/null @@ -1,109 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from ceilometer.network import statistics -from ceilometer import sample - - -class PortPollster(statistics._Base): - - meter_name = 'switch.port' - meter_type = sample.TYPE_GAUGE - meter_unit = 'port' - - -class PortPollsterReceivePackets(statistics._Base): - - meter_name = 'switch.port.receive.packets' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterTransmitPackets(statistics._Base): - - meter_name = 'switch.port.transmit.packets' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterReceiveBytes(statistics._Base): - - meter_name = 'switch.port.receive.bytes' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'B' - - -class PortPollsterTransmitBytes(statistics._Base): - - meter_name = 'switch.port.transmit.bytes' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'B' - - -class PortPollsterReceiveDrops(statistics._Base): - - meter_name = 'switch.port.receive.drops' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterTransmitDrops(statistics._Base): - - meter_name = 'switch.port.transmit.drops' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterReceiveErrors(statistics._Base): - - meter_name = 'switch.port.receive.errors' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterTransmitErrors(statistics._Base): - - meter_name = 'switch.port.transmit.errors' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterReceiveFrameErrors(statistics._Base): - - meter_name = 'switch.port.receive.frame_error' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterReceiveOverrunErrors(statistics._Base): - - meter_name = 'switch.port.receive.overrun_error' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterReceiveCRCErrors(statistics._Base): - - meter_name = 'switch.port.receive.crc_error' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' - - -class PortPollsterCollisionCount(statistics._Base): - - meter_name = 'switch.port.collision.count' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'packet' diff --git a/ceilometer/network/statistics/switch.py b/ceilometer/network/statistics/switch.py deleted file mode 100644 index 268b2589..00000000 --- a/ceilometer/network/statistics/switch.py +++ /dev/null @@ -1,25 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from ceilometer.network import statistics -from ceilometer import sample - - -class SWPollster(statistics._Base): - - meter_name = 'switch' - meter_type = sample.TYPE_GAUGE - meter_unit = 'switch' diff --git a/ceilometer/network/statistics/table.py b/ceilometer/network/statistics/table.py deleted file mode 100644 index 2571cd6a..00000000 --- a/ceilometer/network/statistics/table.py +++ /dev/null @@ -1,46 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from ceilometer.network import statistics -from ceilometer import sample - - -class TablePollster(statistics._Base): - - meter_name = 'switch.table' - meter_type = sample.TYPE_GAUGE - meter_unit = 'table' - - -class TablePollsterActiveEntries(statistics._Base): - - meter_name = 'switch.table.active.entries' - meter_type = sample.TYPE_GAUGE - meter_unit = 'entry' - - -class TablePollsterLookupPackets(statistics._Base): - - meter_name = 'switch.table.lookup.packets' - meter_type = sample.TYPE_GAUGE - meter_unit = 'packet' - - -class TablePollsterMatchedPackets(statistics._Base): - - meter_name = 'switch.table.matched.packets' - meter_type = sample.TYPE_GAUGE - meter_unit = 'packet' diff --git a/ceilometer/neutron_client.py b/ceilometer/neutron_client.py deleted file mode 100644 index 5299ef45..00000000 --- a/ceilometer/neutron_client.py +++ /dev/null @@ -1,418 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools - -from neutronclient.common import exceptions -from neutronclient.v2_0 import client as clientv20 -from oslo_config import cfg -from oslo_log import log - -from ceilometer import keystone_client - -SERVICE_OPTS = [ - cfg.StrOpt('neutron', - default='network', - help='Neutron service type.'), - cfg.StrOpt('neutron_lbaas_version', - default='v2', - choices=('v1', 'v2'), - help='Neutron load balancer version.') -] - -cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') -cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') - -LOG = log.getLogger(__name__) - - -def logged(func): - - @functools.wraps(func) - def with_logging(*args, **kwargs): - try: - return func(*args, **kwargs) - except exceptions.NeutronClientException as e: - if e.status_code == 404: - LOG.warning("The resource could not be found.") - else: - LOG.warning(e) - return [] - except Exception as e: - LOG.exception(e) - raise - - return with_logging - - -class Client(object): - """A client which gets information via python-neutronclient.""" - - def __init__(self): - conf = cfg.CONF.service_credentials - params = { - 'session': keystone_client.get_session(), - 'endpoint_type': conf.interface, - 'region_name': conf.region_name, - 'service_type': cfg.CONF.service_types.neutron, - } - self.client = clientv20.Client(**params) - self.lb_version = cfg.CONF.service_types.neutron_lbaas_version - - @logged - def port_get_all(self): - resp = self.client.list_ports() - return resp.get('ports') - - @logged - def vip_get_all(self): - resp = self.client.list_vips() - return resp.get('vips') - - @logged - def pool_get_all(self): - resources = [] - if self.lb_version == 'v1': - resp = self.client.list_pools() - resources = resp.get('pools') - elif self.lb_version == 'v2': - resources = self.list_pools_v2() - return resources - - @logged - def member_get_all(self): - resources = [] - if self.lb_version == 'v1': - resp = self.client.list_members() - resources = resp.get('members') - elif self.lb_version == 'v2': - resources = self.list_members_v2() - return resources - - @logged - def health_monitor_get_all(self): - resources = [] - if self.lb_version == 'v1': - resp = self.client.list_health_monitors() - resources = resp.get('health_monitors') - elif self.lb_version == 'v2': - resources = self.list_health_monitors_v2() - return resources - - @logged - def pool_stats(self, pool): - return self.client.retrieve_pool_stats(pool) - - @logged - def vpn_get_all(self): - resp = self.client.list_vpnservices() - return resp.get('vpnservices') - - @logged - def ipsec_site_connections_get_all(self): - resp = self.client.list_ipsec_site_connections() - return resp.get('ipsec_site_connections') - - @logged - def firewall_get_all(self): - resp = self.client.list_firewalls() - return resp.get('firewalls') - - @logged - def fw_policy_get_all(self): - resp = self.client.list_firewall_policies() - return resp.get('firewall_policies') - - @logged - def fip_get_all(self): - fips = self.client.list_floatingips()['floatingips'] - return fips - - @logged - def list_pools_v2(self): - """This method is used to get the pools list. - - This method uses Load Balancer v2_0 API to achieve - the detailed list of the pools. - - :returns: The list of the pool resources - """ - pool_status = dict() - resp = self.client.list_lbaas_pools() - temp_pools = resp.get('pools') - resources = [] - pool_listener_dict = self._get_pool_and_listener_ids(temp_pools) - for k, v in pool_listener_dict.items(): - loadbalancer_id = self._get_loadbalancer_id_with_listener_id(v) - status = self._get_pool_status(loadbalancer_id, v) - for k, v in status.items(): - pool_status[k] = v - - for pool in temp_pools: - pool_id = pool.get('id') - pool['status'] = pool_status[pool_id] - pool['lb_method'] = pool.get('lb_algorithm') - pool['status_description'] = pool['status'] - # Based on the LBaaSv2 design, the properties 'vip_id' - # and 'subnet_id' should belong to the loadbalancer resource and - # not to the pool resource. However, because we don't want to - # change the metadata of the pool resource this release, - # we set them to empty values manually. - pool['provider'] = '' - pool['vip_id'] = '' - pool['subnet_id'] = '' - resources.append(pool) - - return resources - - @logged - def list_members_v2(self): - """Method is used to list the members info. - - This method is used to get the detailed list of the members - with Load Balancer v2_0 API - - :returns: The list of the member resources - """ - resources = [] - pools = self.client.list_lbaas_pools().get('pools') - for pool in pools: - pool_id = pool.get('id') - listener_id = pool.get('listeners')[0].get('id') - lb_id = self._get_loadbalancer_id_with_listener_id(listener_id) - status = self._get_member_status(lb_id, [listener_id, pool_id]) - resp = self.client.list_lbaas_members(pool_id) - temp_members = resp.get('members') - for member in temp_members: - member['status'] = status[member.get('id')] - member['pool_id'] = pool_id - member['status_description'] = member['status'] - resources.append(member) - return resources - - @logged - def list_health_monitors_v2(self): - """Method is used to list the health monitors - - This method is used to get the detailed list of the health - monitors with Load Balancer v2_0 - - :returns: The list of the health monitor resources - """ - resp = self.client.list_lbaas_healthmonitors() - resources = resp.get('healthmonitors') - return resources - - def _get_pool_and_listener_ids(self, pools): - """Method is used to get the mapping between pool and listener - - This method is used to get the pool ids and listener ids - from the pool list. - - :param pools: The list of the polls - :returns: The relationship between pool and listener. - It's a dictionary type. The key of this dict is - the id of pool and the value of it is the id of the first - listener which the pool belongs to - """ - pool_listener_dict = dict() - for pool in pools: - key = pool.get("id") - value = pool.get('listeners')[0].get('id') - pool_listener_dict[key] = value - return pool_listener_dict - - def _retrieve_loadbalancer_status_tree(self, loadbalancer_id): - """Method is used to get the status of a LB. - - This method is used to get the status tree of a specific - Load Balancer. - - :param loadbalancer_id: The ID of the specific Load - Balancer. - :returns: The status of the specific Load Balancer. - It consists of the load balancer and all of its - children's provisioning and operating statuses - """ - lb_status_tree = self.client.retrieve_loadbalancer_status( - loadbalancer_id) - return lb_status_tree - - def _get_loadbalancer_id_with_listener_id(self, listener_id): - """This method is used to get the loadbalancer id. - - :param listener_id: The ID of the listener - :returns: The ID of the Loadbalancer - """ - listener = self.client.show_listener(listener_id) - listener_lbs = listener.get('listener').get('loadbalancers') - loadbalancer_id = listener_lbs[0].get('id') - return loadbalancer_id - - def _get_member_status(self, loadbalancer_id, parent_id): - """Method used to get the status of member resource. - - This method is used to get the status of member - resource belonged to the specific Load Balancer. - - :param loadbalancer_id: The ID of the Load Balancer. - :param parent_id: The parent ID list of the member resource. - For the member resource, the parent_id should be [listener_id, - pool_id]. - :returns: The status dictionary of the member - resource. The key is the ID of the member. The value is - the operating status of the member resource. - """ - # FIXME(liamji) the following meters are experimental and - # may generate a large load against neutron api. The future - # enhancements can be tracked against: - # https://review.openstack.org/#/c/218560. - # After it has been merged and the neutron client supports - # with the corresponding apis, will change to use the new - # method to get the status of the members. - resp = self._retrieve_loadbalancer_status_tree(loadbalancer_id) - status_tree = resp.get('statuses').get('loadbalancer') - status_dict = dict() - - listeners_status = status_tree.get('listeners') - for listener_status in listeners_status: - listener_id = listener_status.get('id') - if listener_id == parent_id[0]: - pools_status = listener_status.get('pools') - for pool_status in pools_status: - if pool_status.get('id') == parent_id[1]: - members_status = pool_status.get('members') - for member_status in members_status: - key = member_status.get('id') - # If the item has no the property 'id', skip - # it. - if key is None: - continue - # The situation that the property - # 'operating_status' is none is handled in - # the method get_sample() in lbaas.py. - value = member_status.get('operating_status') - status_dict[key] = value - break - break - - return status_dict - - def _get_listener_status(self, loadbalancer_id): - """Method used to get the status of the listener resource. - - This method is used to get the status of the listener - resources belonged to the specific Load Balancer. - - :param loadbalancer_id: The ID of the Load Balancer. - :returns: The status dictionary of the listener - resource. The key is the ID of the listener resource. The - value is the operating status of the listener resource. - """ - # FIXME(liamji) the following meters are experimental and - # may generate a large load against neutron api. The future - # enhancements can be tracked against: - # https://review.openstack.org/#/c/218560. - # After it has been merged and the neutron client supports - # with the corresponding apis, will change to use the new - # method to get the status of the listeners. - resp = self._retrieve_loadbalancer_status_tree(loadbalancer_id) - status_tree = resp.get('statuses').get('loadbalancer') - status_dict = dict() - - listeners_status = status_tree.get('listeners') - for listener_status in listeners_status: - key = listener_status.get('id') - # If the item has no the property 'id', skip - # it. - if key is None: - continue - # The situation that the property - # 'operating_status' is none is handled in - # the method get_sample() in lbaas.py. - value = listener_status.get('operating_status') - status_dict[key] = value - - return status_dict - - def _get_pool_status(self, loadbalancer_id, parent_id): - """Method used to get the status of pool resource. - - This method is used to get the status of the pool - resources belonged to the specific Load Balancer. - - :param loadbalancer_id: The ID of the Load Balancer. - :param parent_id: The parent ID of the pool resource. - :returns: The status dictionary of the pool resource. - The key is the ID of the pool resource. The value is - the operating status of the pool resource. - """ - # FIXME(liamji) the following meters are experimental and - # may generate a large load against neutron api. The future - # enhancements can be tracked against: - # https://review.openstack.org/#/c/218560. - # After it has been merged and the neutron client supports - # with the corresponding apis, will change to use the new - # method to get the status of the pools. - resp = self._retrieve_loadbalancer_status_tree(loadbalancer_id) - status_tree = resp.get('statuses').get('loadbalancer') - status_dict = dict() - - listeners_status = status_tree.get('listeners') - for listener_status in listeners_status: - listener_id = listener_status.get('id') - if listener_id == parent_id: - pools_status = listener_status.get('pools') - for pool_status in pools_status: - key = pool_status.get('id') - # If the item has no the property 'id', skip - # it. - if key is None: - continue - # The situation that the property - # 'operating_status' is none is handled in - # the method get_sample() in lbaas.py. - value = pool_status.get('operating_status') - status_dict[key] = value - break - - return status_dict - - @logged - def list_listener(self): - """This method is used to get the list of the listeners.""" - resp = self.client.list_listeners() - resources = resp.get('listeners') - for listener in resources: - loadbalancer_id = listener.get('loadbalancers')[0].get('id') - status = self._get_listener_status(loadbalancer_id) - listener['operating_status'] = status[listener.get('id')] - return resources - - @logged - def list_loadbalancer(self): - """This method is used to get the list of the loadbalancers.""" - resp = self.client.list_loadbalancers() - resources = resp.get('loadbalancers') - return resources - - @logged - def get_loadbalancer_stats(self, loadbalancer_id): - """This method is used to get the statistics of the loadbalancer. - - :param loadbalancer_id: the ID of the specified loadbalancer - """ - resp = self.client.retrieve_loadbalancer_stats(loadbalancer_id) - resource = resp.get('stats') - return resource diff --git a/ceilometer/notification.py b/ceilometer/notification.py deleted file mode 100644 index c33e536b..00000000 --- a/ceilometer/notification.py +++ /dev/null @@ -1,340 +0,0 @@ -# -# Copyright 2012-2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import itertools -import threading - -from concurrent import futures -from futurist import periodics -from oslo_config import cfg -from oslo_log import log -import oslo_messaging -from stevedore import extension - -from ceilometer.agent import plugin_base as base -from ceilometer import coordination -from ceilometer.event import endpoint as event_endpoint -from ceilometer import exchange_control -from ceilometer.i18n import _, _LI, _LW -from ceilometer import messaging -from ceilometer import pipeline -from ceilometer import service_base -from ceilometer import utils - - -LOG = log.getLogger(__name__) - - -OPTS = [ - cfg.IntOpt('pipeline_processing_queues', - default=10, - min=1, - help='Number of queues to parallelize workload across. This ' - 'value should be larger than the number of active ' - 'notification agents for optimal results.'), - cfg.BoolOpt('ack_on_event_error', - default=True, - deprecated_group='collector', - help='Acknowledge message when event persistence fails.'), - cfg.BoolOpt('store_events', - deprecated_group='collector', - default=False, - help='Save event details.'), - cfg.BoolOpt('disable_non_metric_meters', - default=True, - help='WARNING: Ceilometer historically offered the ability to ' - 'store events as meters. This usage is NOT advised as it ' - 'can flood the metering database and cause performance ' - 'degradation.'), - cfg.BoolOpt('workload_partitioning', - default=False, - help='Enable workload partitioning, allowing multiple ' - 'notification agents to be run simultaneously.'), - cfg.MultiStrOpt('messaging_urls', - default=[], - secret=True, - help="Messaging URLs to listen for notifications. " - "Example: rabbit://user:pass@host1:port1" - "[,user:pass@hostN:portN]/virtual_host " - "(DEFAULT/transport_url is used if empty). This " - "is useful when you have dedicate messaging nodes " - "for each service, for example, all nova " - "notifications go to rabbit-nova:5672, while all " - "cinder notifications go to rabbit-cinder:5672."), - cfg.IntOpt('batch_size', - default=1, - help='Number of notification messages to wait before ' - 'publishing them'), - cfg.IntOpt('batch_timeout', - default=None, - help='Number of seconds to wait before publishing samples' - 'when batch_size is not reached (None means indefinitely)'), -] - -cfg.CONF.register_opts(exchange_control.EXCHANGE_OPTS) -cfg.CONF.register_opts(OPTS, group="notification") -cfg.CONF.import_opt('telemetry_driver', 'ceilometer.publisher.messaging', - group='publisher_notifier') - - -class NotificationService(service_base.PipelineBasedService): - """Notification service. - - When running multiple agents, additional queuing sequence is required for - inter process communication. Each agent has two listeners: one to listen - to the main OpenStack queue and another listener(and notifier) for IPC to - divide pipeline sink endpoints. Coordination should be enabled to have - proper active/active HA. - """ - - NOTIFICATION_NAMESPACE = 'ceilometer.notification' - NOTIFICATION_IPC = 'ceilometer-pipe' - - @classmethod - def _get_notifications_manager(cls, pm): - return extension.ExtensionManager( - namespace=cls.NOTIFICATION_NAMESPACE, - invoke_on_load=True, - invoke_args=(pm, ) - ) - - def _get_notifiers(self, transport, pipe): - notifiers = [] - for x in range(cfg.CONF.notification.pipeline_processing_queues): - notifiers.append(oslo_messaging.Notifier( - transport, - driver=cfg.CONF.publisher_notifier.telemetry_driver, - publisher_id=pipe.name, - topics=['%s-%s-%s' % (self.NOTIFICATION_IPC, pipe.name, x)])) - return notifiers - - def _get_pipe_manager(self, transport, pipeline_manager): - - if cfg.CONF.notification.workload_partitioning: - pipe_manager = pipeline.SamplePipelineTransportManager() - for pipe in pipeline_manager.pipelines: - key = pipeline.get_pipeline_grouping_key(pipe) - pipe_manager.add_transporter( - (pipe.source.support_meter, key or ['resource_id'], - self._get_notifiers(transport, pipe))) - else: - pipe_manager = pipeline_manager - - return pipe_manager - - def _get_event_pipeline_manager(self, transport): - - if cfg.CONF.notification.store_events: - if cfg.CONF.notification.workload_partitioning: - event_pipe_manager = pipeline.EventPipelineTransportManager() - for pipe in self.event_pipeline_manager.pipelines: - event_pipe_manager.add_transporter( - (pipe.source.support_event, ['event_type'], - self._get_notifiers(transport, pipe))) - else: - event_pipe_manager = self.event_pipeline_manager - - return event_pipe_manager - - def start(self): - super(NotificationService, self).start() - self.periodic = None - self.partition_coordinator = None - self.coord_lock = threading.Lock() - - self.listeners = [] - - # NOTE(kbespalov): for the pipeline queues used a single amqp host - # hence only one listener is required - self.pipeline_listener = None - - self.pipeline_manager = pipeline.setup_pipeline() - - if cfg.CONF.notification.store_events: - self.event_pipeline_manager = pipeline.setup_event_pipeline() - - self.transport = messaging.get_transport() - - if cfg.CONF.notification.workload_partitioning: - self.group_id = self.NOTIFICATION_NAMESPACE - self.partition_coordinator = coordination.PartitionCoordinator() - self.partition_coordinator.start() - else: - # FIXME(sileht): endpoint uses the notification_topics option - # and it should not because this is an oslo_messaging option - # not a ceilometer. Until we have something to get the - # notification_topics in another way, we must create a transport - # to ensure the option has been registered by oslo_messaging. - messaging.get_notifier(self.transport, '') - self.group_id = None - - self.pipe_manager = self._get_pipe_manager(self.transport, - self.pipeline_manager) - self.event_pipe_manager = self._get_event_pipeline_manager( - self.transport) - - self._configure_main_queue_listeners(self.pipe_manager, - self.event_pipe_manager) - - if cfg.CONF.notification.workload_partitioning: - # join group after all manager set up is configured - self.partition_coordinator.join_group(self.group_id) - self.partition_coordinator.watch_group(self.group_id, - self._refresh_agent) - - @periodics.periodic(spacing=cfg.CONF.coordination.heartbeat, - run_immediately=True) - def heartbeat(): - self.partition_coordinator.heartbeat() - - @periodics.periodic(spacing=cfg.CONF.coordination.check_watchers, - run_immediately=True) - def run_watchers(): - self.partition_coordinator.run_watchers() - - self.periodic = periodics.PeriodicWorker.create( - [], executor_factory=lambda: - futures.ThreadPoolExecutor(max_workers=10)) - self.periodic.add(heartbeat) - self.periodic.add(run_watchers) - - utils.spawn_thread(self.periodic.start) - - # configure pipelines after all coordination is configured. - self._configure_pipeline_listener() - - if not cfg.CONF.notification.disable_non_metric_meters: - LOG.warning(_LW('Non-metric meters may be collected. It is highly ' - 'advisable to disable these meters using ' - 'ceilometer.conf or the pipeline.yaml')) - - self.init_pipeline_refresh() - - def _configure_main_queue_listeners(self, pipe_manager, - event_pipe_manager): - notification_manager = self._get_notifications_manager(pipe_manager) - if not list(notification_manager): - LOG.warning(_('Failed to load any notification handlers for %s'), - self.NOTIFICATION_NAMESPACE) - - ack_on_error = cfg.CONF.notification.ack_on_event_error - - endpoints = [] - if cfg.CONF.notification.store_events: - endpoints.append( - event_endpoint.EventsNotificationEndpoint(event_pipe_manager)) - - targets = [] - for ext in notification_manager: - handler = ext.obj - if (cfg.CONF.notification.disable_non_metric_meters and - isinstance(handler, base.NonMetricNotificationBase)): - continue - LOG.debug('Event types from %(name)s: %(type)s' - ' (ack_on_error=%(error)s)', - {'name': ext.name, - 'type': ', '.join(handler.event_types), - 'error': ack_on_error}) - # NOTE(gordc): this could be a set check but oslo_messaging issue - # https://bugs.launchpad.net/oslo.messaging/+bug/1398511 - # This ensures we don't create multiple duplicate consumers. - for new_tar in handler.get_targets(cfg.CONF): - if new_tar not in targets: - targets.append(new_tar) - endpoints.append(handler) - - urls = cfg.CONF.notification.messaging_urls or [None] - for url in urls: - transport = messaging.get_transport(url) - listener = messaging.get_batch_notification_listener( - transport, targets, endpoints, - batch_size=cfg.CONF.notification.batch_size, - batch_timeout=cfg.CONF.notification.batch_timeout) - listener.start() - self.listeners.append(listener) - - def _refresh_agent(self, event): - self._configure_pipeline_listener() - - def _configure_pipeline_listener(self): - with self.coord_lock: - ev_pipes = [] - if cfg.CONF.notification.store_events: - ev_pipes = self.event_pipeline_manager.pipelines - pipelines = self.pipeline_manager.pipelines + ev_pipes - transport = messaging.get_transport() - partitioned = self.partition_coordinator.extract_my_subset( - self.group_id, - range(cfg.CONF.notification.pipeline_processing_queues)) - - endpoints = [] - targets = [] - - for pipe in pipelines: - if isinstance(pipe, pipeline.EventPipeline): - endpoints.append(pipeline.EventPipelineEndpoint(pipe)) - else: - endpoints.append(pipeline.SamplePipelineEndpoint(pipe)) - - for pipe_set, pipe in itertools.product(partitioned, pipelines): - LOG.debug('Pipeline endpoint: %s from set: %s', - pipe.name, pipe_set) - topic = '%s-%s-%s' % (self.NOTIFICATION_IPC, - pipe.name, pipe_set) - targets.append(oslo_messaging.Target(topic=topic)) - - if self.pipeline_listener: - self.pipeline_listener.stop() - self.pipeline_listener.wait() - - self.pipeline_listener = messaging.get_batch_notification_listener( - transport, - targets, - endpoints, - batch_size=cfg.CONF.notification.batch_size, - batch_timeout=cfg.CONF.notification.batch_timeout) - self.pipeline_listener.start() - - def stop(self): - if self.started: - if self.periodic: - self.periodic.stop() - self.periodic.wait() - if self.partition_coordinator: - self.partition_coordinator.stop() - if self.pipeline_listener: - utils.kill_listeners([self.pipeline_listener]) - utils.kill_listeners(self.listeners) - super(NotificationService, self).stop() - - def reload_pipeline(self): - LOG.info(_LI("Reloading notification agent and listeners.")) - - if self.pipeline_validated: - self.pipe_manager = self._get_pipe_manager( - self.transport, self.pipeline_manager) - - if self.event_pipeline_validated: - self.event_pipe_manager = self._get_event_pipeline_manager( - self.transport) - - # restart the main queue listeners. - utils.kill_listeners(self.listeners) - self._configure_main_queue_listeners( - self.pipe_manager, self.event_pipe_manager) - - # restart the pipeline listeners if workload partitioning - # is enabled. - if cfg.CONF.notification.workload_partitioning: - self._configure_pipeline_listener() diff --git a/ceilometer/nova_client.py b/ceilometer/nova_client.py deleted file mode 100644 index b5578e63..00000000 --- a/ceilometer/nova_client.py +++ /dev/null @@ -1,171 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools - -import novaclient -from novaclient import api_versions -from novaclient import client as nova_client -from oslo_config import cfg -from oslo_log import log - -from ceilometer import keystone_client - -OPTS = [ - cfg.BoolOpt('nova_http_log_debug', - default=False, - # Added in Mitaka - deprecated_for_removal=True, - help=('Allow novaclient\'s debug log output. ' - '(Use default_log_levels instead)')), -] - -SERVICE_OPTS = [ - cfg.StrOpt('nova', - default='compute', - help='Nova service type.'), -] - -cfg.CONF.register_opts(OPTS) -cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') -cfg.CONF.import_opt('http_timeout', 'ceilometer.service') -cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') - -LOG = log.getLogger(__name__) - - -def logged(func): - - @functools.wraps(func) - def with_logging(*args, **kwargs): - try: - return func(*args, **kwargs) - except Exception as e: - LOG.exception(e) - raise - - return with_logging - - -class Client(object): - """A client which gets information via python-novaclient.""" - - def __init__(self, endpoint_override=None, auth=None): - """Initialize a nova client object.""" - conf = cfg.CONF.service_credentials - - logger = None - if cfg.CONF.nova_http_log_debug: - logger = log.getLogger("novaclient-debug") - logger.logger.setLevel(log.DEBUG) - - self.nova_client = nova_client.Client( - version=api_versions.APIVersion('2.1'), - session=keystone_client.get_session(), - - # nova adapter options - region_name=conf.region_name, - interface=conf.interface, - service_type=cfg.CONF.service_types.nova, - - # keystone adapter options - endpoint_override=endpoint_override, - auth=auth, - logger=logger) - - def _with_flavor_and_image(self, instances): - flavor_cache = {} - image_cache = {} - for instance in instances: - self._with_flavor(instance, flavor_cache) - self._with_image(instance, image_cache) - - return instances - - def _with_flavor(self, instance, cache): - fid = instance.flavor['id'] - if fid in cache: - flavor = cache.get(fid) - else: - try: - flavor = self.nova_client.flavors.get(fid) - except novaclient.exceptions.NotFound: - flavor = None - cache[fid] = flavor - - attr_defaults = [('name', 'unknown-id-%s' % fid), - ('vcpus', 0), ('ram', 0), ('disk', 0), - ('ephemeral', 0)] - - for attr, default in attr_defaults: - if not flavor: - instance.flavor[attr] = default - continue - instance.flavor[attr] = getattr(flavor, attr, default) - - def _with_image(self, instance, cache): - try: - iid = instance.image['id'] - except TypeError: - instance.image = None - instance.kernel_id = None - instance.ramdisk_id = None - return - - if iid in cache: - image = cache.get(iid) - else: - try: - image = self.nova_client.images.get(iid) - except novaclient.exceptions.NotFound: - image = None - cache[iid] = image - - attr_defaults = [('kernel_id', None), - ('ramdisk_id', None)] - - instance.image['name'] = ( - getattr(image, 'name') if image else 'unknown-id-%s' % iid) - image_metadata = getattr(image, 'metadata', None) - - for attr, default in attr_defaults: - ameta = image_metadata.get(attr) if image_metadata else default - setattr(instance, attr, ameta) - - @logged - def instance_get_all_by_host(self, hostname, since=None): - """Returns list of instances on particular host. - - If since is supplied, it will return the instances changed since that - datetime. since should be in ISO Format '%Y-%m-%dT%H:%M:%SZ' - """ - search_opts = {'host': hostname, 'all_tenants': True} - if since: - search_opts['changes-since'] = since - return self._with_flavor_and_image(self.nova_client.servers.list( - detailed=True, - search_opts=search_opts)) - - @logged - def instance_get_all(self, since=None): - """Returns list of all instances. - - If since is supplied, it will return the instances changes since that - datetime. since should be in ISO Format '%Y-%m-%dT%H:%M:%SZ' - """ - search_opts = {'all_tenants': True} - if since: - search_opts['changes-since'] = since - return self.nova_client.servers.list( - detailed=True, - search_opts=search_opts) diff --git a/ceilometer/objectstore/__init__.py b/ceilometer/objectstore/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/objectstore/rgw.py b/ceilometer/objectstore/rgw.py deleted file mode 100644 index 581df4c2..00000000 --- a/ceilometer/objectstore/rgw.py +++ /dev/null @@ -1,210 +0,0 @@ -# -# Copyright 2015 Reliance Jio Infocomm Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Common code for working with ceph object stores -""" - -from keystoneauth1 import exceptions -from oslo_config import cfg -from oslo_log import log -import six.moves.urllib.parse as urlparse - -from ceilometer.agent import plugin_base -from ceilometer import keystone_client -from ceilometer import sample - -LOG = log.getLogger(__name__) - -SERVICE_OPTS = [ - cfg.StrOpt('radosgw', - default='object-store', - help='Radosgw service type.'), -] - -CREDENTIAL_OPTS = [ - cfg.StrOpt('access_key', - secret=True, - help='Access key for Radosgw Admin.'), - cfg.StrOpt('secret_key', - secret=True, - help='Secret key for Radosgw Admin.') -] - -cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') -cfg.CONF.register_opts(CREDENTIAL_OPTS, group='rgw_admin_credentials') -cfg.CONF.import_group('rgw_admin_credentials', 'ceilometer.service') - - -class _Base(plugin_base.PollsterBase): - METHOD = 'bucket' - _ENDPOINT = None - - def __init__(self): - self.access_key = cfg.CONF.rgw_admin_credentials.access_key - self.secret = cfg.CONF.rgw_admin_credentials.secret_key - - @property - def default_discovery(self): - return 'tenant' - - @property - def CACHE_KEY_METHOD(self): - return 'rgw.get_%s' % self.METHOD - - @staticmethod - def _get_endpoint(ksclient): - # we store the endpoint as a base class attribute, so keystone is - # only ever called once, also we assume that in a single deployment - # we may be only deploying `radosgw` or `swift` as the object-store - if _Base._ENDPOINT is None: - try: - conf = cfg.CONF.service_credentials - rgw_url = keystone_client.get_service_catalog( - ksclient).url_for( - service_type=cfg.CONF.service_types.radosgw, - interface=conf.interface) - _Base._ENDPOINT = urlparse.urljoin(rgw_url, '/admin') - except exceptions.EndpointNotFound: - LOG.debug("Radosgw endpoint not found") - return _Base._ENDPOINT - - def _iter_accounts(self, ksclient, cache, tenants): - if self.CACHE_KEY_METHOD not in cache: - cache[self.CACHE_KEY_METHOD] = list(self._get_account_info( - ksclient, tenants)) - return iter(cache[self.CACHE_KEY_METHOD]) - - def _get_account_info(self, ksclient, tenants): - endpoint = self._get_endpoint(ksclient) - if not endpoint: - raise StopIteration() - - try: - from ceilometer.objectstore.rgw_client import RGWAdminClient - rgw_client = RGWAdminClient(endpoint, self.access_key, self.secret) - except ImportError: - raise plugin_base.PollsterPermanentError(tenants) - - for t in tenants: - api_method = 'get_%s' % self.METHOD - yield t.id, getattr(rgw_client, api_method)(t.id) - - -class ContainersObjectsPollster(_Base): - """Get info about object counts in a container using RGW Admin APIs.""" - - def get_samples(self, manager, cache, resources): - for tenant, bucket_info in self._iter_accounts(manager.keystone, - cache, resources): - for it in bucket_info['buckets']: - yield sample.Sample( - name='radosgw.containers.objects', - type=sample.TYPE_GAUGE, - volume=int(it.num_objects), - unit='object', - user_id=None, - project_id=tenant, - resource_id=tenant + '/' + it.name, - resource_metadata=None, - ) - - -class ContainersSizePollster(_Base): - """Get info about object sizes in a container using RGW Admin APIs.""" - - def get_samples(self, manager, cache, resources): - for tenant, bucket_info in self._iter_accounts(manager.keystone, - cache, resources): - for it in bucket_info['buckets']: - yield sample.Sample( - name='radosgw.containers.objects.size', - type=sample.TYPE_GAUGE, - volume=int(it.size * 1024), - unit='B', - user_id=None, - project_id=tenant, - resource_id=tenant + '/' + it.name, - resource_metadata=None, - ) - - -class ObjectsSizePollster(_Base): - """Iterate over all accounts, using keystone.""" - - def get_samples(self, manager, cache, resources): - for tenant, bucket_info in self._iter_accounts(manager.keystone, - cache, resources): - yield sample.Sample( - name='radosgw.objects.size', - type=sample.TYPE_GAUGE, - volume=int(bucket_info['size'] * 1024), - unit='B', - user_id=None, - project_id=tenant, - resource_id=tenant, - resource_metadata=None, - ) - - -class ObjectsPollster(_Base): - """Iterate over all accounts, using keystone.""" - - def get_samples(self, manager, cache, resources): - for tenant, bucket_info in self._iter_accounts(manager.keystone, - cache, resources): - yield sample.Sample( - name='radosgw.objects', - type=sample.TYPE_GAUGE, - volume=int(bucket_info['num_objects']), - unit='object', - user_id=None, - project_id=tenant, - resource_id=tenant, - resource_metadata=None, - ) - - -class ObjectsContainersPollster(_Base): - def get_samples(self, manager, cache, resources): - for tenant, bucket_info in self._iter_accounts(manager.keystone, - cache, resources): - yield sample.Sample( - name='radosgw.objects.containers', - type=sample.TYPE_GAUGE, - volume=int(bucket_info['num_buckets']), - unit='object', - user_id=None, - project_id=tenant, - resource_id=tenant, - resource_metadata=None, - ) - - -class UsagePollster(_Base): - - METHOD = 'usage' - - def get_samples(self, manager, cache, resources): - for tenant, usage in self._iter_accounts(manager.keystone, - cache, resources): - yield sample.Sample( - name='radosgw.api.request', - type=sample.TYPE_GAUGE, - volume=int(usage), - unit='request', - user_id=None, - project_id=tenant, - resource_id=tenant, - resource_metadata=None, - ) diff --git a/ceilometer/objectstore/rgw_client.py b/ceilometer/objectstore/rgw_client.py deleted file mode 100644 index 2a3d1d7f..00000000 --- a/ceilometer/objectstore/rgw_client.py +++ /dev/null @@ -1,72 +0,0 @@ -# -# Copyright 2015 Reliance Jio Infocomm Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from collections import namedtuple - -from awsauth import S3Auth -import requests -import six.moves.urllib.parse as urlparse - -from ceilometer.i18n import _ - - -class RGWAdminAPIFailed(Exception): - pass - - -class RGWAdminClient(object): - Bucket = namedtuple('Bucket', 'name, num_objects, size') - - def __init__(self, endpoint, access_key, secret_key): - self.access_key = access_key - self.secret = secret_key - self.endpoint = endpoint - self.hostname = urlparse.urlparse(endpoint).netloc - - def _make_request(self, path, req_params): - uri = "{0}/{1}".format(self.endpoint, path) - r = requests.get(uri, params=req_params, - auth=S3Auth(self.access_key, self.secret, - self.hostname) - ) - - if r.status_code != 200: - raise RGWAdminAPIFailed( - _('RGW AdminOps API returned %(status)s %(reason)s') % - {'status': r.status_code, 'reason': r.reason}) - - return r.json() - - def get_bucket(self, tenant_id): - path = "bucket" - req_params = {"uid": tenant_id, "stats": "true"} - json_data = self._make_request(path, req_params) - stats = {'num_buckets': 0, 'buckets': [], 'size': 0, 'num_objects': 0} - stats['num_buckets'] = len(json_data) - for it in json_data: - for k, v in it["usage"].items(): - stats['num_objects'] += v["num_objects"] - stats['size'] += v["size_kb"] - stats['buckets'].append(self.Bucket(it["bucket"], - v["num_objects"], v["size_kb"])) - return stats - - def get_usage(self, tenant_id): - path = "usage" - req_params = {"uid": tenant_id} - json_data = self._make_request(path, req_params) - usage_data = json_data["summary"] - return sum((it["total"]["ops"] for it in usage_data)) diff --git a/ceilometer/objectstore/swift.py b/ceilometer/objectstore/swift.py deleted file mode 100644 index d57ff68b..00000000 --- a/ceilometer/objectstore/swift.py +++ /dev/null @@ -1,202 +0,0 @@ -# -# Copyright 2012 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Common code for working with object stores -""" - -from __future__ import absolute_import - -from keystoneauth1 import exceptions -from oslo_config import cfg -from oslo_log import log -import six.moves.urllib.parse as urlparse -from swiftclient import client as swift - -from ceilometer.agent import plugin_base -from ceilometer.i18n import _LI -from ceilometer import keystone_client -from ceilometer import sample - - -LOG = log.getLogger(__name__) - -OPTS = [ - cfg.StrOpt('reseller_prefix', - default='AUTH_', - help="Swift reseller prefix. Must be on par with " - "reseller_prefix in proxy-server.conf."), -] - -SERVICE_OPTS = [ - cfg.StrOpt('swift', - default='object-store', - help='Swift service type.'), -] - -cfg.CONF.register_opts(OPTS) -cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') -cfg.CONF.import_group('service_credentials', 'ceilometer.keystone_client') - - -class _Base(plugin_base.PollsterBase): - - METHOD = 'head' - _ENDPOINT = None - - @property - def default_discovery(self): - return 'tenant' - - @property - def CACHE_KEY_METHOD(self): - return 'swift.%s_account' % self.METHOD - - @staticmethod - def _get_endpoint(ksclient): - # we store the endpoint as a base class attribute, so keystone is - # only ever called once - if _Base._ENDPOINT is None: - try: - conf = cfg.CONF.service_credentials - _Base._ENDPOINT = keystone_client.get_service_catalog( - ksclient).url_for( - service_type=cfg.CONF.service_types.swift, - interface=conf.interface, - region_name=conf.region_name) - except exceptions.EndpointNotFound as e: - LOG.info(_LI("Swift endpoint not found: %s"), e) - return _Base._ENDPOINT - - def _iter_accounts(self, ksclient, cache, tenants): - if self.CACHE_KEY_METHOD not in cache: - cache[self.CACHE_KEY_METHOD] = list(self._get_account_info( - ksclient, tenants)) - return iter(cache[self.CACHE_KEY_METHOD]) - - def _get_account_info(self, ksclient, tenants): - endpoint = self._get_endpoint(ksclient) - if not endpoint: - raise StopIteration() - - for t in tenants: - api_method = '%s_account' % self.METHOD - yield (t.id, getattr(swift, api_method) - (self._neaten_url(endpoint, t.id), - keystone_client.get_auth_token(ksclient))) - - @staticmethod - def _neaten_url(endpoint, tenant_id): - """Transform the registered url to standard and valid format.""" - return urlparse.urljoin(endpoint.split('/v1')[0].rstrip('/') + '/', - 'v1/' + cfg.CONF.reseller_prefix + tenant_id) - - -class ObjectsPollster(_Base): - """Collect the total objects count for each project.""" - def get_samples(self, manager, cache, resources): - tenants = resources - for tenant, account in self._iter_accounts(manager.keystone, - cache, tenants): - yield sample.Sample( - name='storage.objects', - type=sample.TYPE_GAUGE, - volume=int(account['x-account-object-count']), - unit='object', - user_id=None, - project_id=tenant, - resource_id=tenant, - resource_metadata=None, - ) - - -class ObjectsSizePollster(_Base): - """Collect the total objects size of each project.""" - def get_samples(self, manager, cache, resources): - tenants = resources - for tenant, account in self._iter_accounts(manager.keystone, - cache, tenants): - yield sample.Sample( - name='storage.objects.size', - type=sample.TYPE_GAUGE, - volume=int(account['x-account-bytes-used']), - unit='B', - user_id=None, - project_id=tenant, - resource_id=tenant, - resource_metadata=None, - ) - - -class ObjectsContainersPollster(_Base): - """Collect the container count for each project.""" - def get_samples(self, manager, cache, resources): - tenants = resources - for tenant, account in self._iter_accounts(manager.keystone, - cache, tenants): - yield sample.Sample( - name='storage.objects.containers', - type=sample.TYPE_GAUGE, - volume=int(account['x-account-container-count']), - unit='container', - user_id=None, - project_id=tenant, - resource_id=tenant, - resource_metadata=None, - ) - - -class ContainersObjectsPollster(_Base): - """Collect the objects count per container for each project.""" - - METHOD = 'get' - - def get_samples(self, manager, cache, resources): - tenants = resources - for tenant, account in self._iter_accounts(manager.keystone, - cache, tenants): - containers_info = account[1] - for container in containers_info: - yield sample.Sample( - name='storage.containers.objects', - type=sample.TYPE_GAUGE, - volume=int(container['count']), - unit='object', - user_id=None, - project_id=tenant, - resource_id=tenant + '/' + container['name'], - resource_metadata=None, - ) - - -class ContainersSizePollster(_Base): - """Collect the total objects size per container for each project.""" - - METHOD = 'get' - - def get_samples(self, manager, cache, resources): - tenants = resources - for tenant, account in self._iter_accounts(manager.keystone, - cache, tenants): - containers_info = account[1] - for container in containers_info: - yield sample.Sample( - name='storage.containers.objects.size', - type=sample.TYPE_GAUGE, - volume=int(container['bytes']), - unit='B', - user_id=None, - project_id=tenant, - resource_id=tenant + '/' + container['name'], - resource_metadata=None, - ) diff --git a/ceilometer/opts.py b/ceilometer/opts.py index 8f9f93a1..46fbb5db 100644 --- a/ceilometer/opts.py +++ b/ceilometer/opts.py @@ -13,112 +13,19 @@ # under the License. import itertools -from keystoneauth1 import loading - -import ceilometer.agent.manager import ceilometer.api import ceilometer.api.app -import ceilometer.cmd.polling -import ceilometer.collector -import ceilometer.compute.discovery -import ceilometer.compute.notifications -import ceilometer.compute.util -import ceilometer.compute.virt.inspector -import ceilometer.compute.virt.libvirt.inspector -import ceilometer.compute.virt.vmware.inspector -import ceilometer.compute.virt.xenapi.inspector -import ceilometer.coordination import ceilometer.dispatcher -import ceilometer.dispatcher.file -import ceilometer.dispatcher.gnocchi -import ceilometer.energy.kwapi -import ceilometer.event.converter -import ceilometer.hardware.discovery -import ceilometer.image.glance -import ceilometer.ipmi.notifications.ironic -import ceilometer.ipmi.platform.intel_node_manager -import ceilometer.ipmi.pollsters -import ceilometer.keystone_client -import ceilometer.meter.notifications -import ceilometer.middleware -import ceilometer.network.notifications -import ceilometer.neutron_client -import ceilometer.notification -import ceilometer.nova_client -import ceilometer.objectstore.rgw -import ceilometer.objectstore.swift -import ceilometer.pipeline -import ceilometer.publisher.messaging -import ceilometer.publisher.utils -import ceilometer.sample -import ceilometer.service import ceilometer.storage import ceilometer.utils def list_opts(): return [ - ('DEFAULT', - itertools.chain(ceilometer.agent.manager.OPTS, - ceilometer.api.app.OPTS, - ceilometer.cmd.polling.CLI_OPTS, - ceilometer.compute.notifications.OPTS, - ceilometer.compute.util.OPTS, - ceilometer.compute.virt.inspector.OPTS, - ceilometer.compute.virt.libvirt.inspector.OPTS, - ceilometer.dispatcher.OPTS, - ceilometer.image.glance.OPTS, - ceilometer.ipmi.notifications.ironic.OPTS, - ceilometer.middleware.OPTS, - ceilometer.network.notifications.OPTS, - ceilometer.nova_client.OPTS, - ceilometer.objectstore.swift.OPTS, - ceilometer.pipeline.OPTS, - ceilometer.sample.OPTS, - ceilometer.service.OPTS, - ceilometer.storage.CLI_OPTS, - ceilometer.utils.OPTS,)), + ('DEFAULT', ceilometer.api.app.OPTS), ('api', itertools.chain(ceilometer.api.OPTS, - ceilometer.api.app.API_OPTS, - [ceilometer.service.API_OPT])), - ('collector', - itertools.chain(ceilometer.collector.OPTS, - [ceilometer.service.COLL_OPT])), - ('compute', ceilometer.compute.discovery.OPTS), - ('coordination', ceilometer.coordination.OPTS), + ceilometer.api.app.API_OPTS)), ('database', ceilometer.storage.OPTS), - ('dispatcher_file', ceilometer.dispatcher.file.OPTS), - ('dispatcher_gnocchi', ceilometer.dispatcher.gnocchi.dispatcher_opts), - ('event', ceilometer.event.converter.OPTS), - ('exchange_control', ceilometer.exchange_control.EXCHANGE_OPTS), - ('hardware', ceilometer.hardware.discovery.OPTS), - ('ipmi', - itertools.chain(ceilometer.ipmi.platform.intel_node_manager.OPTS, - ceilometer.ipmi.pollsters.OPTS)), - ('meter', ceilometer.meter.notifications.OPTS), - ('notification', - itertools.chain(ceilometer.notification.OPTS, - [ceilometer.service.NOTI_OPT])), - ('polling', ceilometer.agent.manager.POLLING_OPTS), - ('publisher', ceilometer.publisher.utils.OPTS), - ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), - ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), - # NOTE(sileht): the configuration file contains only the options - # for the password plugin that handles keystone v2 and v3 API - # with discovery. But other options are possible. - ('service_credentials', ( - ceilometer.keystone_client.CLI_OPTS + - loading.get_auth_common_conf_options() + - loading.get_auth_plugin_conf_options('password'))), - ('service_types', - itertools.chain(ceilometer.energy.kwapi.SERVICE_OPTS, - ceilometer.image.glance.SERVICE_OPTS, - ceilometer.neutron_client.SERVICE_OPTS, - ceilometer.nova_client.SERVICE_OPTS, - ceilometer.objectstore.rgw.SERVICE_OPTS, - ceilometer.objectstore.swift.SERVICE_OPTS,)), ('storage', ceilometer.dispatcher.STORAGE_OPTS), - ('vmware', ceilometer.compute.virt.vmware.inspector.OPTS), - ('xenapi', ceilometer.compute.virt.xenapi.inspector.OPTS), ] diff --git a/ceilometer/pipeline.py b/ceilometer/pipeline.py deleted file mode 100644 index 13bc6c5a..00000000 --- a/ceilometer/pipeline.py +++ /dev/null @@ -1,866 +0,0 @@ -# -# Copyright 2013 Intel Corp. -# Copyright 2014 Red Hat, Inc -# -# Authors: Yunhong Jiang -# Eoghan Glynn -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import hashlib -from itertools import chain -import os - -from oslo_config import cfg -from oslo_log import log -import oslo_messaging -from oslo_utils import fnmatch -from oslo_utils import timeutils -import six -from stevedore import extension -import yaml - - -from ceilometer.event.storage import models -from ceilometer.i18n import _, _LI, _LW -from ceilometer import publisher -from ceilometer.publisher import utils as publisher_utils -from ceilometer import sample as sample_util - - -OPTS = [ - cfg.StrOpt('pipeline_cfg_file', - default="pipeline.yaml", - help="Configuration file for pipeline definition." - ), - cfg.StrOpt('event_pipeline_cfg_file', - default="event_pipeline.yaml", - help="Configuration file for event pipeline definition." - ), - cfg.BoolOpt('refresh_pipeline_cfg', - default=False, - help="Refresh Pipeline configuration on-the-fly." - ), - cfg.BoolOpt('refresh_event_pipeline_cfg', - default=False, - help="Refresh Event Pipeline configuration on-the-fly." - ), - cfg.IntOpt('pipeline_polling_interval', - default=20, - help="Polling interval for pipeline file configuration" - " in seconds." - ), -] - -cfg.CONF.register_opts(OPTS) - -LOG = log.getLogger(__name__) - - -class PipelineException(Exception): - def __init__(self, message, pipeline_cfg): - self.msg = message - self.pipeline_cfg = pipeline_cfg - - def __str__(self): - return 'Pipeline %s: %s' % (self.pipeline_cfg, self.msg) - - -@six.add_metaclass(abc.ABCMeta) -class PipelineEndpoint(object): - - def __init__(self, pipeline): - self.filter_rule = oslo_messaging.NotificationFilter( - publisher_id=pipeline.name) - self.publish_context = PublishContext([pipeline]) - - @abc.abstractmethod - def sample(self, messages): - pass - - -class SamplePipelineEndpoint(PipelineEndpoint): - def sample(self, messages): - samples = chain.from_iterable(m["payload"] for m in messages) - samples = [ - sample_util.Sample(name=s['counter_name'], - type=s['counter_type'], - unit=s['counter_unit'], - volume=s['counter_volume'], - user_id=s['user_id'], - project_id=s['project_id'], - resource_id=s['resource_id'], - timestamp=s['timestamp'], - resource_metadata=s['resource_metadata'], - source=s.get('source')) - for s in samples if publisher_utils.verify_signature( - s, cfg.CONF.publisher.telemetry_secret) - ] - with self.publish_context as p: - p(samples) - - -class EventPipelineEndpoint(PipelineEndpoint): - def sample(self, messages): - events = chain.from_iterable(m["payload"] for m in messages) - events = [ - models.Event( - message_id=ev['message_id'], - event_type=ev['event_type'], - generated=timeutils.normalize_time( - timeutils.parse_isotime(ev['generated'])), - traits=[models.Trait(name, dtype, - models.Trait.convert_value(dtype, value)) - for name, dtype, value in ev['traits']], - raw=ev.get('raw', {})) - for ev in events if publisher_utils.verify_signature( - ev, cfg.CONF.publisher.telemetry_secret) - ] - try: - with self.publish_context as p: - p(events) - except Exception: - if not cfg.CONF.notification.ack_on_event_error: - return oslo_messaging.NotificationResult.REQUEUE - raise - return oslo_messaging.NotificationResult.HANDLED - - -class _PipelineTransportManager(object): - def __init__(self): - self.transporters = [] - - @staticmethod - def hash_grouping(datapoint, grouping_keys): - value = '' - for key in grouping_keys or []: - value += datapoint.get(key) if datapoint.get(key) else '' - return hash(value) - - def add_transporter(self, transporter): - self.transporters.append(transporter) - - def publisher(self): - serializer = self.serializer - hash_grouping = self.hash_grouping - transporters = self.transporters - filter_attr = self.filter_attr - event_type = self.event_type - - class PipelinePublishContext(object): - def __enter__(self): - def p(data): - # TODO(gordc): cleanup so payload is always single - # datapoint. we can't correctly bucketise - # datapoints if batched. - data = [data] if not isinstance(data, list) else data - for datapoint in data: - serialized_data = serializer(datapoint) - for d_filter, grouping_keys, notifiers in transporters: - if d_filter(serialized_data[filter_attr]): - key = (hash_grouping(serialized_data, - grouping_keys) - % len(notifiers)) - notifier = notifiers[key] - notifier.sample({}, - event_type=event_type, - payload=[serialized_data]) - return p - - def __exit__(self, exc_type, exc_value, traceback): - pass - - return PipelinePublishContext() - - -class SamplePipelineTransportManager(_PipelineTransportManager): - filter_attr = 'counter_name' - event_type = 'ceilometer.pipeline' - - @staticmethod - def serializer(data): - return publisher_utils.meter_message_from_counter( - data, cfg.CONF.publisher.telemetry_secret) - - -class EventPipelineTransportManager(_PipelineTransportManager): - filter_attr = 'event_type' - event_type = 'pipeline.event' - - @staticmethod - def serializer(data): - return publisher_utils.message_from_event( - data, cfg.CONF.publisher.telemetry_secret) - - -class PublishContext(object): - - def __init__(self, pipelines=None): - pipelines = pipelines or [] - self.pipelines = set(pipelines) - - def add_pipelines(self, pipelines): - self.pipelines.update(pipelines) - - def __enter__(self): - def p(data): - for p in self.pipelines: - p.publish_data(data) - return p - - def __exit__(self, exc_type, exc_value, traceback): - for p in self.pipelines: - p.flush() - - -class Source(object): - """Represents a source of samples or events.""" - - def __init__(self, cfg): - self.cfg = cfg - - try: - self.name = cfg['name'] - self.sinks = cfg.get('sinks') - except KeyError as err: - raise PipelineException( - "Required field %s not specified" % err.args[0], cfg) - - def __str__(self): - return self.name - - def check_sinks(self, sinks): - if not self.sinks: - raise PipelineException( - "No sink defined in source %s" % self, - self.cfg) - for sink in self.sinks: - if sink not in sinks: - raise PipelineException( - "Dangling sink %s from source %s" % (sink, self), - self.cfg) - - def check_source_filtering(self, data, d_type): - """Source data rules checking - - - At least one meaningful datapoint exist - - Included type and excluded type can't co-exist on the same pipeline - - Included type meter and wildcard can't co-exist at same pipeline - """ - if not data: - raise PipelineException('No %s specified' % d_type, self.cfg) - - if ([x for x in data if x[0] not in '!*'] and - [x for x in data if x[0] == '!']): - raise PipelineException( - 'Both included and excluded %s specified' % d_type, - cfg) - - if '*' in data and [x for x in data if x[0] not in '!*']: - raise PipelineException( - 'Included %s specified with wildcard' % d_type, - self.cfg) - - @staticmethod - def is_supported(dataset, data_name): - # Support wildcard like storage.* and !disk.* - # Start with negation, we consider that the order is deny, allow - if any(fnmatch.fnmatch(data_name, datapoint[1:]) - for datapoint in dataset if datapoint[0] == '!'): - return False - - if any(fnmatch.fnmatch(data_name, datapoint) - for datapoint in dataset if datapoint[0] != '!'): - return True - - # if we only have negation, we suppose the default is allow - return all(datapoint.startswith('!') for datapoint in dataset) - - -class EventSource(Source): - """Represents a source of events. - - In effect it is a set of notification handlers capturing events for a set - of matching notifications. - """ - - def __init__(self, cfg): - super(EventSource, self).__init__(cfg) - self.events = cfg.get('events') - self.check_source_filtering(self.events, 'events') - - def support_event(self, event_name): - return self.is_supported(self.events, event_name) - - -class SampleSource(Source): - """Represents a source of samples. - - In effect it is a set of pollsters and/or notification handlers emitting - samples for a set of matching meters. Each source encapsulates meter name - matching, polling interval determination, optional resource enumeration or - discovery, and mapping to one or more sinks for publication. - """ - - def __init__(self, cfg): - super(SampleSource, self).__init__(cfg) - # Support 'counters' for backward compatibility - self.meters = cfg.get('meters', cfg.get('counters')) - try: - self.interval = int(cfg.get('interval', 600)) - except ValueError: - raise PipelineException("Invalid interval value", cfg) - if self.interval <= 0: - raise PipelineException("Interval value should > 0", cfg) - - self.resources = cfg.get('resources') or [] - if not isinstance(self.resources, list): - raise PipelineException("Resources should be a list", cfg) - - self.discovery = cfg.get('discovery') or [] - if not isinstance(self.discovery, list): - raise PipelineException("Discovery should be a list", cfg) - self.check_source_filtering(self.meters, 'meters') - - def get_interval(self): - return self.interval - - def support_meter(self, meter_name): - return self.is_supported(self.meters, meter_name) - - -class Sink(object): - """Represents a sink for the transformation and publication of data. - - Each sink config is concerned *only* with the transformation rules - and publication conduits for data. - - In effect, a sink describes a chain of handlers. The chain starts - with zero or more transformers and ends with one or more publishers. - - The first transformer in the chain is passed data from the - corresponding source, takes some action such as deriving rate of - change, performing unit conversion, or aggregating, before passing - the modified data to next step. - - The subsequent transformers, if any, handle the data similarly. - - At the end of the chain, publishers publish the data. The exact - publishing method depends on publisher type, for example, pushing - into data storage via the message bus providing guaranteed delivery, - or for loss-tolerant data UDP may be used. - - If no transformers are included in the chain, the publishers are - passed data directly from the sink which are published unchanged. - """ - - def __init__(self, cfg, transformer_manager): - self.cfg = cfg - - try: - self.name = cfg['name'] - # It's legal to have no transformer specified - self.transformer_cfg = cfg.get('transformers') or [] - except KeyError as err: - raise PipelineException( - "Required field %s not specified" % err.args[0], cfg) - - if not cfg.get('publishers'): - raise PipelineException("No publisher specified", cfg) - - self.publishers = [] - for p in cfg['publishers']: - if '://' not in p: - # Support old format without URL - p = p + "://" - try: - self.publishers.append(publisher.get_publisher(p, - self.NAMESPACE)) - except Exception: - LOG.exception(_("Unable to load publisher %s"), p) - - self.multi_publish = True if len(self.publishers) > 1 else False - self.transformers = self._setup_transformers(cfg, transformer_manager) - - def __str__(self): - return self.name - - def _setup_transformers(self, cfg, transformer_manager): - transformers = [] - for transformer in self.transformer_cfg: - parameter = transformer['parameters'] or {} - try: - ext = transformer_manager[transformer['name']] - except KeyError: - raise PipelineException( - "No transformer named %s loaded" % transformer['name'], - cfg) - transformers.append(ext.plugin(**parameter)) - LOG.info(_LI( - "Pipeline %(pipeline)s: Setup transformer instance %(name)s " - "with parameter %(param)s") % ({'pipeline': self, - 'name': transformer['name'], - 'param': parameter})) - - return transformers - - -class EventSink(Sink): - - NAMESPACE = 'ceilometer.event.publisher' - - def publish_events(self, events): - if events: - for p in self.publishers: - try: - p.publish_events(events) - except Exception: - LOG.exception(_("Pipeline %(pipeline)s: %(status)s" - " after error from publisher %(pub)s") % - ({'pipeline': self, 'status': 'Continue' if - self.multi_publish else 'Exit', 'pub': p} - )) - if not self.multi_publish: - raise - - @staticmethod - def flush(): - """Flush data after all events have been injected to pipeline.""" - - -class SampleSink(Sink): - - NAMESPACE = 'ceilometer.publisher' - - def _transform_sample(self, start, sample): - try: - for transformer in self.transformers[start:]: - sample = transformer.handle_sample(sample) - if not sample: - LOG.debug( - "Pipeline %(pipeline)s: Sample dropped by " - "transformer %(trans)s", {'pipeline': self, - 'trans': transformer}) - return - return sample - except Exception as err: - # TODO(gordc): only use one log level. - LOG.warning(_("Pipeline %(pipeline)s: " - "Exit after error from transformer " - "%(trans)s for %(smp)s") % ({'pipeline': self, - 'trans': transformer, - 'smp': sample})) - LOG.exception(err) - - def _publish_samples(self, start, samples): - """Push samples into pipeline for publishing. - - :param start: The first transformer that the sample will be injected. - This is mainly for flush() invocation that transformer - may emit samples. - :param samples: Sample list. - - """ - - transformed_samples = [] - if not self.transformers: - transformed_samples = samples - else: - for sample in samples: - LOG.debug( - "Pipeline %(pipeline)s: Transform sample " - "%(smp)s from %(trans)s transformer", {'pipeline': self, - 'smp': sample, - 'trans': start}) - sample = self._transform_sample(start, sample) - if sample: - transformed_samples.append(sample) - - if transformed_samples: - for p in self.publishers: - try: - p.publish_samples(transformed_samples) - except Exception: - LOG.exception(_( - "Pipeline %(pipeline)s: Continue after error " - "from publisher %(pub)s") % ({'pipeline': self, - 'pub': p})) - - def publish_samples(self, samples): - self._publish_samples(0, samples) - - def flush(self): - """Flush data after all samples have been injected to pipeline.""" - - for (i, transformer) in enumerate(self.transformers): - try: - self._publish_samples(i + 1, - list(transformer.flush())) - except Exception as err: - LOG.warning(_( - "Pipeline %(pipeline)s: Error flushing " - "transformer %(trans)s") % ({'pipeline': self, - 'trans': transformer})) - LOG.exception(err) - - -@six.add_metaclass(abc.ABCMeta) -class Pipeline(object): - """Represents a coupling between a sink and a corresponding source.""" - - def __init__(self, source, sink): - self.source = source - self.sink = sink - self.name = str(self) - - def __str__(self): - return (self.source.name if self.source.name == self.sink.name - else '%s:%s' % (self.source.name, self.sink.name)) - - def flush(self): - self.sink.flush() - - @property - def publishers(self): - return self.sink.publishers - - @abc.abstractmethod - def publish_data(self, data): - """Publish data from pipeline.""" - - -class EventPipeline(Pipeline): - """Represents a pipeline for Events.""" - - def __str__(self): - # NOTE(gordc): prepend a namespace so we ensure event and sample - # pipelines do not have the same name. - return 'event:%s' % super(EventPipeline, self).__str__() - - def support_event(self, event_type): - return self.source.support_event(event_type) - - def publish_data(self, events): - if not isinstance(events, list): - events = [events] - supported = [e for e in events - if self.source.support_event(e.event_type)] - self.sink.publish_events(supported) - - -class SamplePipeline(Pipeline): - """Represents a pipeline for Samples.""" - - def get_interval(self): - return self.source.interval - - @property - def resources(self): - return self.source.resources - - @property - def discovery(self): - return self.source.discovery - - def support_meter(self, meter_name): - return self.source.support_meter(meter_name) - - def _validate_volume(self, s): - volume = s.volume - if volume is None: - LOG.warning(_LW( - 'metering data %(counter_name)s for %(resource_id)s ' - '@ %(timestamp)s has no volume (volume: None), the sample will' - ' be dropped') - % {'counter_name': s.name, - 'resource_id': s.resource_id, - 'timestamp': s.timestamp if s.timestamp else 'NO TIMESTAMP'} - ) - return False - if not isinstance(volume, (int, float)): - try: - volume = float(volume) - except ValueError: - LOG.warning(_LW( - 'metering data %(counter_name)s for %(resource_id)s ' - '@ %(timestamp)s has volume which is not a number ' - '(volume: %(counter_volume)s), the sample will be dropped') - % {'counter_name': s.name, - 'resource_id': s.resource_id, - 'timestamp': ( - s.timestamp if s.timestamp else 'NO TIMESTAMP'), - 'counter_volume': volume} - ) - return False - return True - - def publish_data(self, samples): - if not isinstance(samples, list): - samples = [samples] - supported = [s for s in samples if self.source.support_meter(s.name) - and self._validate_volume(s)] - self.sink.publish_samples(supported) - - -SAMPLE_TYPE = {'pipeline': SamplePipeline, - 'source': SampleSource, - 'sink': SampleSink} - -EVENT_TYPE = {'pipeline': EventPipeline, - 'source': EventSource, - 'sink': EventSink} - - -class PipelineManager(object): - """Pipeline Manager - - Pipeline manager sets up pipelines according to config file - - Usually only one pipeline manager exists in the system. - - """ - - def __init__(self, cfg, transformer_manager, p_type=SAMPLE_TYPE): - """Setup the pipelines according to config. - - The configuration is supported as follows: - - Decoupled: the source and sink configuration are separately - specified before being linked together. This allows source- - specific configuration, such as resource discovery, to be - kept focused only on the fine-grained source while avoiding - the necessity for wide duplication of sink-related config. - - The configuration is provided in the form of separate lists - of dictionaries defining sources and sinks, for example: - - {"sources": [{"name": source_1, - "interval": interval_time, - "meters" : ["meter_1", "meter_2"], - "resources": ["resource_uri1", "resource_uri2"], - "sinks" : ["sink_1", "sink_2"] - }, - {"name": source_2, - "interval": interval_time, - "meters" : ["meter_3"], - "sinks" : ["sink_2"] - }, - ], - "sinks": [{"name": sink_1, - "transformers": [ - {"name": "Transformer_1", - "parameters": {"p1": "value"}}, - - {"name": "Transformer_2", - "parameters": {"p1": "value"}}, - ], - "publishers": ["publisher_1", "publisher_2"] - }, - {"name": sink_2, - "publishers": ["publisher_3"] - }, - ] - } - - The interval determines the cadence of sample injection into - the pipeline where samples are produced under the direct control - of an agent, i.e. via a polling cycle as opposed to incoming - notifications. - - Valid meter format is '*', '!meter_name', or 'meter_name'. - '*' is wildcard symbol means any meters; '!meter_name' means - "meter_name" will be excluded; 'meter_name' means 'meter_name' - will be included. - - The 'meter_name" is Sample name field. - - Valid meters definition is all "included meter names", all - "excluded meter names", wildcard and "excluded meter names", or - only wildcard. - - The resources is list of URI indicating the resources from where - the meters should be polled. It's optional and it's up to the - specific pollster to decide how to use it. - - Transformer's name is plugin name in setup.cfg. - - Publisher's name is plugin name in setup.cfg - - """ - self.pipelines = [] - if not ('sources' in cfg and 'sinks' in cfg): - raise PipelineException("Both sources & sinks are required", - cfg) - LOG.info(_LI('detected decoupled pipeline config format')) - - unique_names = set() - sources = [] - for s in cfg.get('sources', []): - name = s.get('name') - if name in unique_names: - raise PipelineException("Duplicated source names: %s" % - name, self) - else: - unique_names.add(name) - sources.append(p_type['source'](s)) - unique_names.clear() - - sinks = {} - for s in cfg.get('sinks', []): - name = s.get('name') - if name in unique_names: - raise PipelineException("Duplicated sink names: %s" % - name, self) - else: - unique_names.add(name) - sinks[s['name']] = p_type['sink'](s, transformer_manager) - unique_names.clear() - - for source in sources: - source.check_sinks(sinks) - for target in source.sinks: - pipe = p_type['pipeline'](source, sinks[target]) - if pipe.name in unique_names: - raise PipelineException( - "Duplicate pipeline name: %s. Ensure pipeline" - " names are unique. (name is the source and sink" - " names combined)" % pipe.name, cfg) - else: - unique_names.add(pipe.name) - self.pipelines.append(pipe) - unique_names.clear() - - def publisher(self): - """Build a new Publisher for these manager pipelines. - - :param context: The context. - """ - return PublishContext(self.pipelines) - - -class PollingManager(object): - """Polling Manager - - Polling manager sets up polling according to config file. - """ - - def __init__(self, cfg): - """Setup the polling according to config. - - The configuration is the sources half of the Pipeline Config. - """ - self.sources = [] - if not ('sources' in cfg and 'sinks' in cfg): - raise PipelineException("Both sources & sinks are required", - cfg) - LOG.info(_LI('detected decoupled pipeline config format')) - - unique_names = set() - for s in cfg.get('sources', []): - name = s.get('name') - if name in unique_names: - raise PipelineException("Duplicated source names: %s" % - name, self) - else: - unique_names.add(name) - self.sources.append(SampleSource(s)) - unique_names.clear() - - -def _setup_pipeline_manager(cfg_file, transformer_manager, p_type=SAMPLE_TYPE): - if not os.path.exists(cfg_file): - cfg_file = cfg.CONF.find_file(cfg_file) - - LOG.debug("Pipeline config file: %s", cfg_file) - - with open(cfg_file) as fap: - data = fap.read() - - pipeline_cfg = yaml.safe_load(data) - LOG.info(_LI("Pipeline config: %s"), pipeline_cfg) - - return PipelineManager(pipeline_cfg, - transformer_manager or - extension.ExtensionManager( - 'ceilometer.transformer', - ), p_type) - - -def _setup_polling_manager(cfg_file): - if not os.path.exists(cfg_file): - cfg_file = cfg.CONF.find_file(cfg_file) - - LOG.debug("Polling config file: %s", cfg_file) - - with open(cfg_file) as fap: - data = fap.read() - - pipeline_cfg = yaml.safe_load(data) - LOG.info(_LI("Pipeline config: %s"), pipeline_cfg) - - return PollingManager(pipeline_cfg) - - -def setup_event_pipeline(transformer_manager=None): - """Setup event pipeline manager according to yaml config file.""" - cfg_file = cfg.CONF.event_pipeline_cfg_file - return _setup_pipeline_manager(cfg_file, transformer_manager, EVENT_TYPE) - - -def setup_pipeline(transformer_manager=None): - """Setup pipeline manager according to yaml config file.""" - cfg_file = cfg.CONF.pipeline_cfg_file - return _setup_pipeline_manager(cfg_file, transformer_manager) - - -def _get_pipeline_cfg_file(p_type=SAMPLE_TYPE): - if p_type == EVENT_TYPE: - cfg_file = cfg.CONF.event_pipeline_cfg_file - else: - cfg_file = cfg.CONF.pipeline_cfg_file - - if not os.path.exists(cfg_file): - cfg_file = cfg.CONF.find_file(cfg_file) - - return cfg_file - - -def get_pipeline_mtime(p_type=SAMPLE_TYPE): - cfg_file = _get_pipeline_cfg_file(p_type) - return os.path.getmtime(cfg_file) - - -def get_pipeline_hash(p_type=SAMPLE_TYPE): - - cfg_file = _get_pipeline_cfg_file(p_type) - with open(cfg_file) as fap: - data = fap.read() - if six.PY3: - data = data.encode('utf-8') - - file_hash = hashlib.md5(data).hexdigest() - return file_hash - - -def setup_polling(): - """Setup polling manager according to yaml config file.""" - cfg_file = cfg.CONF.pipeline_cfg_file - return _setup_polling_manager(cfg_file) - - -def get_pipeline_grouping_key(pipe): - keys = [] - for transformer in pipe.sink.transformers: - keys += transformer.grouping_keys - return list(set(keys)) diff --git a/ceilometer/publisher/__init__.py b/ceilometer/publisher/__init__.py index 2966124b..e69de29b 100644 --- a/ceilometer/publisher/__init__.py +++ b/ceilometer/publisher/__init__.py @@ -1,48 +0,0 @@ -# -# Copyright 2013 Intel Corp. -# Copyright 2013-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from oslo_utils import netutils -import six -from stevedore import driver - - -def get_publisher(url, namespace='ceilometer.publisher'): - """Get publisher driver and load it. - - :param URL: URL for the publisher - :param namespace: Namespace to use to look for drivers. - """ - parse_result = netutils.urlsplit(url) - loaded_driver = driver.DriverManager(namespace, parse_result.scheme) - return loaded_driver.driver(parse_result) - - -@six.add_metaclass(abc.ABCMeta) -class PublisherBase(object): - """Base class for plugins that publish data.""" - - def __init__(self, parsed_url): - pass - - @abc.abstractmethod - def publish_samples(self, samples): - """Publish samples into final conduit.""" - - @abc.abstractmethod - def publish_events(self, events): - """Publish events into final conduit.""" diff --git a/ceilometer/publisher/direct.py b/ceilometer/publisher/direct.py deleted file mode 100644 index 6a52350b..00000000 --- a/ceilometer/publisher/direct.py +++ /dev/null @@ -1,59 +0,0 @@ -# -# Copyright 2015 Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_utils import timeutils - -from ceilometer.dispatcher import database -from ceilometer import publisher -from ceilometer.publisher import utils - - -class DirectPublisher(publisher.PublisherBase): - """A publisher that allows saving directly from the pipeline. - - Samples are saved to the currently configured database by hitching - a ride on the DatabaseDispatcher. This is useful where it is desirable - to limit the number of external services that are required. - """ - - def __init__(self, parsed_url): - super(DirectPublisher, self).__init__(parsed_url) - dispatcher = database.DatabaseDispatcher(cfg.CONF) - self.meter_conn = dispatcher.meter_conn - self.event_conn = dispatcher.event_conn - - def publish_samples(self, samples): - if not isinstance(samples, list): - samples = [samples] - - # Transform the Sample objects into a list of dicts - meters = [ - utils.meter_message_from_counter( - sample, cfg.CONF.publisher.telemetry_secret) - for sample in samples - ] - - for meter in meters: - if meter.get('timestamp'): - ts = timeutils.parse_isotime(meter['timestamp']) - meter['timestamp'] = timeutils.normalize_time(ts) - self.meter_conn.record_metering_data(meter) - - def publish_events(self, events): - if not isinstance(events, list): - events = [events] - - self.event_conn.record_events(events) diff --git a/ceilometer/publisher/file.py b/ceilometer/publisher/file.py deleted file mode 100644 index fd3714f8..00000000 --- a/ceilometer/publisher/file.py +++ /dev/null @@ -1,104 +0,0 @@ -# -# Copyright 2013 IBM Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import logging.handlers - -from oslo_log import log -from six.moves.urllib import parse as urlparse - -import ceilometer -from ceilometer.i18n import _ -from ceilometer import publisher - -LOG = log.getLogger(__name__) - - -class FilePublisher(publisher.PublisherBase): - """Publisher metering data to file. - - The publisher which records metering data into a file. The file name and - location should be configured in ceilometer pipeline configuration file. - If a file name and location is not specified, this File Publisher will not - log any meters other than log a warning in Ceilometer log file. - - To enable this publisher, add the following section to the - /etc/ceilometer/publisher.yaml file or simply add it to an existing - pipeline:: - - - - name: meter_file - interval: 600 - counters: - - "*" - transformers: - publishers: - - file:///var/test?max_bytes=10000000&backup_count=5 - - File path is required for this publisher to work properly. If max_bytes - or backup_count is missing, FileHandler will be used to save the metering - data. If max_bytes and backup_count are present, RotatingFileHandler will - be used to save the metering data. - """ - - def __init__(self, parsed_url): - super(FilePublisher, self).__init__(parsed_url) - - self.publisher_logger = None - path = parsed_url.path - if not path or path.lower() == 'file': - LOG.error(_('The path for the file publisher is required')) - return - - rfh = None - max_bytes = 0 - backup_count = 0 - # Handling other configuration options in the query string - if parsed_url.query: - params = urlparse.parse_qs(parsed_url.query) - if params.get('max_bytes') and params.get('backup_count'): - try: - max_bytes = int(params.get('max_bytes')[0]) - backup_count = int(params.get('backup_count')[0]) - except ValueError: - LOG.error(_('max_bytes and backup_count should be ' - 'numbers.')) - return - # create rotating file handler - rfh = logging.handlers.RotatingFileHandler( - path, encoding='utf8', maxBytes=max_bytes, - backupCount=backup_count) - - self.publisher_logger = logging.Logger('publisher.file') - self.publisher_logger.propagate = False - self.publisher_logger.setLevel(logging.INFO) - rfh.setLevel(logging.INFO) - self.publisher_logger.addHandler(rfh) - - def publish_samples(self, samples): - """Send a metering message for publishing - - :param samples: Samples from pipeline after transformation - """ - if self.publisher_logger: - for sample in samples: - self.publisher_logger.info(sample.as_dict()) - - def publish_events(self, events): - """Send an event message for publishing - - :param events: events from pipeline after transformation - """ - raise ceilometer.NotImplementedError diff --git a/ceilometer/publisher/http.py b/ceilometer/publisher/http.py deleted file mode 100644 index 97e942c8..00000000 --- a/ceilometer/publisher/http.py +++ /dev/null @@ -1,137 +0,0 @@ -# -# Copyright 2016 IBM -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log -from oslo_serialization import jsonutils -import requests -from requests import adapters -from six.moves.urllib import parse as urlparse - -from ceilometer.i18n import _LE -from ceilometer import publisher - -LOG = log.getLogger(__name__) - - -class HttpPublisher(publisher.PublisherBase): - """Publisher metering data to a http endpoint - - The publisher which records metering data into a http endpoint. The - endpoint should be configured in ceilometer pipeline configuration file. - If the timeout and/or retry_count are not specified, the default timeout - and retry_count will be set to 1000 and 2 respectively. - - To use this publisher for samples, add the following section to the - /etc/ceilometer/publisher.yaml file or simply add it to an existing - pipeline:: - - - name: meter_file - interval: 600 - counters: - - "*" - transformers: - publishers: - - http://host:80/path?timeout=1&max_retries=2 - - To use this publisher for events, the raw message needs to be present in - the event. To enable that, ceilometer.conf file will need to have a - section like the following: - - [event] - store_raw = info - - Then in the event_pipeline.yaml file, you can use the publisher in one of - the sinks like the following: - - - name: event_sink - transformers: - publishers: - - http://host:80/path?timeout=1&max_retries=2 - - Http end point is required for this publisher to work properly. - """ - - def __init__(self, parsed_url): - super(HttpPublisher, self).__init__(parsed_url) - self.target = parsed_url.geturl() - - if not parsed_url.hostname: - raise ValueError('The hostname of an endpoint for ' - 'HttpPublisher is required') - - # non-numeric port from the url string will cause a ValueError - # exception when the port is read. Do a read to make sure the port - # is valid, if not, ValueError will be thrown. - parsed_url.port - - self.headers = {'Content-type': 'application/json'} - - # Handling other configuration options in the query string - if parsed_url.query: - params = urlparse.parse_qs(parsed_url.query) - self.timeout = self._get_param(params, 'timeout', 1) - self.max_retries = self._get_param(params, 'max_retries', 2) - else: - self.timeout = 1 - self.max_retries = 2 - - LOG.debug('HttpPublisher for endpoint %s is initialized!' % - self.target) - - @staticmethod - def _get_param(params, name, default_value): - try: - return int(params.get(name)[-1]) - except (ValueError, TypeError): - LOG.debug('Default value %(value)s is used for %(name)s' % - {'value': default_value, 'name': name}) - return default_value - - def _do_post(self, data): - if not data: - LOG.debug('Data set is empty!') - return - - session = requests.Session() - session.mount(self.target, - adapters.HTTPAdapter(max_retries=self.max_retries)) - - content = ','.join([jsonutils.dumps(item) for item in data]) - content = '[' + content + ']' - - LOG.debug('Data to be posted by HttpPublisher: %s' % content) - - res = session.post(self.target, data=content, headers=self.headers, - timeout=self.timeout) - if res.status_code >= 300: - LOG.error(_LE('Data post failed with status code %s') % - res.status_code) - - def publish_samples(self, samples): - """Send a metering message for publishing - - :param samples: Samples from pipeline after transformation - """ - data = [sample.as_dict() for sample in samples] - self._do_post(data) - - def publish_events(self, events): - """Send an event message for publishing - - :param events: events from pipeline after transformation - """ - data = [evt.as_dict()['raw']['payload'] for evt in events - if evt.as_dict().get('raw', {}).get('payload')] - self._do_post(data) diff --git a/ceilometer/publisher/kafka_broker.py b/ceilometer/publisher/kafka_broker.py deleted file mode 100644 index 2ecae2d6..00000000 --- a/ceilometer/publisher/kafka_broker.py +++ /dev/null @@ -1,96 +0,0 @@ -# -# Copyright 2015 Cisco Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import kafka -from oslo_log import log -from oslo_serialization import jsonutils -from oslo_utils import netutils -from six.moves.urllib import parse as urlparse - -from ceilometer.i18n import _LE -from ceilometer.publisher import messaging - -LOG = log.getLogger(__name__) - - -class KafkaBrokerPublisher(messaging.MessagingPublisher): - """Publish metering data to kafka broker. - - The ip address and port number of kafka broker should be configured in - ceilometer pipeline configuration file. If an ip address is not specified, - this kafka publisher will not publish any meters. - - To enable this publisher, add the following section to the - /etc/ceilometer/pipeline.yaml file or simply add it to an existing - pipeline:: - - meter: - - name: meter_kafka - interval: 600 - counters: - - "*" - transformers: - sinks: - - kafka_sink - sinks: - - name: kafka_sink - transformers: - publishers: - - kafka://[kafka_broker_ip]:[kafka_broker_port]?topic=[topic] - - Kafka topic name and broker's port are required for this publisher to work - properly. If topic parameter is missing, this kafka publisher publish - metering data under a topic name, 'ceilometer'. If the port number is not - specified, this Kafka Publisher will use 9092 as the broker's port. - This publisher has transmit options such as queue, drop, and retry. These - options are specified using policy field of URL parameter. When queue - option could be selected, local queue length can be determined using - max_queue_length field as well. When the transfer fails with retry - option, try to resend the data as many times as specified in max_retry - field. If max_retry is not specified, default the number of retry is 100. - """ - - def __init__(self, parsed_url): - super(KafkaBrokerPublisher, self).__init__(parsed_url) - options = urlparse.parse_qs(parsed_url.query) - - self._producer = None - self._host, self._port = netutils.parse_host_port( - parsed_url.netloc, default_port=9092) - self._topic = options.get('topic', ['ceilometer'])[-1] - self.max_retry = int(options.get('max_retry', [100])[-1]) - - def _ensure_connection(self): - if self._producer: - return - - try: - client = kafka.KafkaClient("%s:%s" % (self._host, self._port)) - self._producer = kafka.SimpleProducer(client) - except Exception as e: - LOG.exception(_LE("Failed to connect to Kafka service: %s"), e) - raise messaging.DeliveryFailure('Kafka Client is not available, ' - 'please restart Kafka client') - - def _send(self, event_type, data): - self._ensure_connection() - # TODO(sileht): don't split the payload into multiple network - # message ... but how to do that without breaking consuming - # application... - try: - for d in data: - self._producer.send_messages(self._topic, jsonutils.dumps(d)) - except Exception as e: - messaging.raise_delivery_failure(e) diff --git a/ceilometer/publisher/messaging.py b/ceilometer/publisher/messaging.py deleted file mode 100644 index 3a12690f..00000000 --- a/ceilometer/publisher/messaging.py +++ /dev/null @@ -1,221 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Publish a sample using the preferred RPC mechanism. -""" - -import abc -import itertools -import operator - -from oslo_config import cfg -from oslo_log import log -import oslo_messaging -from oslo_utils import encodeutils -from oslo_utils import excutils -import six -import six.moves.urllib.parse as urlparse - -from ceilometer.i18n import _, _LE, _LI -from ceilometer import messaging -from ceilometer import publisher -from ceilometer.publisher import utils - - -LOG = log.getLogger(__name__) - -NOTIFIER_OPTS = [ - cfg.StrOpt('metering_topic', - default='metering', - help='The topic that ceilometer uses for metering ' - 'notifications.', - ), - cfg.StrOpt('event_topic', - default='event', - help='The topic that ceilometer uses for event ' - 'notifications.', - ), - cfg.StrOpt('telemetry_driver', - default='messagingv2', - help='The driver that ceilometer uses for metering ' - 'notifications.', - deprecated_name='metering_driver', - ) -] - -cfg.CONF.register_opts(NOTIFIER_OPTS, - group="publisher_notifier") -cfg.CONF.import_opt('host', 'ceilometer.service') - - -class DeliveryFailure(Exception): - def __init__(self, message=None, cause=None): - super(DeliveryFailure, self).__init__(message) - self.cause = cause - - -def raise_delivery_failure(exc): - excutils.raise_with_cause(DeliveryFailure, - encodeutils.exception_to_unicode(exc), - cause=exc) - - -@six.add_metaclass(abc.ABCMeta) -class MessagingPublisher(publisher.PublisherBase): - - def __init__(self, parsed_url): - options = urlparse.parse_qs(parsed_url.query) - # the value of options is a list of url param values - # only take care of the latest one if the option - # is provided more than once - self.per_meter_topic = bool(int( - options.get('per_meter_topic', [0])[-1])) - - self.policy = options.get('policy', ['default'])[-1] - self.max_queue_length = int(options.get( - 'max_queue_length', [1024])[-1]) - self.max_retry = 0 - - self.local_queue = [] - - if self.policy in ['default', 'queue', 'drop']: - LOG.info(_LI('Publishing policy set to %s') % self.policy) - else: - LOG.warning(_('Publishing policy is unknown (%s) force to ' - 'default') % self.policy) - self.policy = 'default' - - self.retry = 1 if self.policy in ['queue', 'drop'] else None - - def publish_samples(self, samples): - """Publish samples on RPC. - - :param samples: Samples from pipeline after transformation. - - """ - - meters = [ - utils.meter_message_from_counter( - sample, cfg.CONF.publisher.telemetry_secret) - for sample in samples - ] - topic = cfg.CONF.publisher_notifier.metering_topic - self.local_queue.append((topic, meters)) - - if self.per_meter_topic: - for meter_name, meter_list in itertools.groupby( - sorted(meters, key=operator.itemgetter('counter_name')), - operator.itemgetter('counter_name')): - meter_list = list(meter_list) - topic_name = topic + '.' + meter_name - LOG.debug('Publishing %(m)d samples on %(n)s', - {'m': len(meter_list), 'n': topic_name}) - self.local_queue.append((topic_name, meter_list)) - - self.flush() - - def flush(self): - # NOTE(sileht): - # this is why the self.local_queue is emptied before processing the - # queue and the remaining messages in the queue are added to - # self.local_queue after in case of another call having already added - # something in the self.local_queue - queue = self.local_queue - self.local_queue = [] - self.local_queue = (self._process_queue(queue, self.policy) + - self.local_queue) - if self.policy == 'queue': - self._check_queue_length() - - def _check_queue_length(self): - queue_length = len(self.local_queue) - if queue_length > self.max_queue_length > 0: - count = queue_length - self.max_queue_length - self.local_queue = self.local_queue[count:] - LOG.warning(_("Publisher max local_queue length is exceeded, " - "dropping %d oldest samples") % count) - - def _process_queue(self, queue, policy): - current_retry = 0 - while queue: - topic, data = queue[0] - try: - self._send(topic, data) - except DeliveryFailure: - data = sum([len(m) for __, m in queue]) - if policy == 'queue': - LOG.warning(_("Failed to publish %d datapoints, queue " - "them"), data) - return queue - elif policy == 'drop': - LOG.warning(_("Failed to publish %d datapoints, " - "dropping them"), data) - return [] - current_retry += 1 - if current_retry >= self.max_retry: - LOG.exception(_LE("Failed to retry to send sample data " - "with max_retry times")) - raise - else: - queue.pop(0) - return [] - - def publish_events(self, events): - """Send an event message for publishing - - :param events: events from pipeline after transformation - """ - ev_list = [utils.message_from_event( - event, cfg.CONF.publisher.telemetry_secret) for event in events] - - topic = cfg.CONF.publisher_notifier.event_topic - self.local_queue.append((topic, ev_list)) - self.flush() - - @abc.abstractmethod - def _send(self, topic, meters): - """Send the meters to the messaging topic.""" - - -class NotifierPublisher(MessagingPublisher): - def __init__(self, parsed_url, default_topic): - super(NotifierPublisher, self).__init__(parsed_url) - options = urlparse.parse_qs(parsed_url.query) - topic = options.get('topic', [default_topic]) - self.notifier = oslo_messaging.Notifier( - messaging.get_transport(), - driver=cfg.CONF.publisher_notifier.telemetry_driver, - publisher_id='telemetry.publisher.%s' % cfg.CONF.host, - topics=topic, - retry=self.retry - ) - - def _send(self, event_type, data): - try: - self.notifier.sample({}, event_type=event_type, - payload=data) - except oslo_messaging.MessageDeliveryFailure as e: - raise_delivery_failure(e) - - -class SampleNotifierPublisher(NotifierPublisher): - def __init__(self, parsed_url): - super(SampleNotifierPublisher, self).__init__( - parsed_url, cfg.CONF.publisher_notifier.metering_topic) - - -class EventNotifierPublisher(NotifierPublisher): - def __init__(self, parsed_url): - super(EventNotifierPublisher, self).__init__( - parsed_url, cfg.CONF.publisher_notifier.event_topic) diff --git a/ceilometer/publisher/test.py b/ceilometer/publisher/test.py deleted file mode 100644 index 8ae3b1e7..00000000 --- a/ceilometer/publisher/test.py +++ /dev/null @@ -1,43 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Publish a sample in memory, useful for testing -""" - -from ceilometer import publisher - - -class TestPublisher(publisher.PublisherBase): - """Publisher used in unit testing.""" - - def __init__(self, parsed_url): - self.samples = [] - self.events = [] - self.calls = 0 - - def publish_samples(self, samples): - """Send a metering message for publishing - - :param samples: Samples from pipeline after transformation - """ - self.samples.extend(samples) - self.calls += 1 - - def publish_events(self, events): - """Send an event message for publishing - - :param events: events from pipeline after transformation - """ - self.events.extend(events) - self.calls += 1 diff --git a/ceilometer/publisher/udp.py b/ceilometer/publisher/udp.py deleted file mode 100644 index dd677b4b..00000000 --- a/ceilometer/publisher/udp.py +++ /dev/null @@ -1,74 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Publish a sample using an UDP mechanism -""" - -import socket - -import msgpack -from oslo_config import cfg -from oslo_log import log -from oslo_utils import netutils - -import ceilometer -from ceilometer.i18n import _ -from ceilometer import publisher -from ceilometer.publisher import utils - -cfg.CONF.import_opt('udp_port', 'ceilometer.collector', - group='collector') - -LOG = log.getLogger(__name__) - - -class UDPPublisher(publisher.PublisherBase): - def __init__(self, parsed_url): - self.host, self.port = netutils.parse_host_port( - parsed_url.netloc, - default_port=cfg.CONF.collector.udp_port) - if netutils.is_valid_ipv6(self.host): - addr_family = socket.AF_INET6 - else: - addr_family = socket.AF_INET - self.socket = socket.socket(addr_family, - socket.SOCK_DGRAM) - - def publish_samples(self, samples): - """Send a metering message for publishing - - :param samples: Samples from pipeline after transformation - """ - - for sample in samples: - msg = utils.meter_message_from_counter( - sample, cfg.CONF.publisher.telemetry_secret) - host = self.host - port = self.port - LOG.debug("Publishing sample %(msg)s over UDP to " - "%(host)s:%(port)d", {'msg': msg, 'host': host, - 'port': port}) - try: - self.socket.sendto(msgpack.dumps(msg), - (self.host, self.port)) - except Exception as e: - LOG.warning(_("Unable to send sample over UDP")) - LOG.exception(e) - - def publish_events(self, events): - """Send an event message for publishing - - :param events: events from pipeline after transformation - """ - raise ceilometer.NotImplementedError diff --git a/ceilometer/publisher/utils.py b/ceilometer/publisher/utils.py deleted file mode 100644 index 6f377312..00000000 --- a/ceilometer/publisher/utils.py +++ /dev/null @@ -1,143 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Utils for publishers -""" - -import hashlib -import hmac - -from oslo_config import cfg -import six - -from ceilometer import utils - -OPTS = [ - cfg.StrOpt('telemetry_secret', - secret=True, - default='change this for valid signing', - help='Secret value for signing messages. Set value empty if ' - 'signing is not required to avoid computational overhead.', - deprecated_opts=[cfg.DeprecatedOpt("metering_secret", - "DEFAULT"), - cfg.DeprecatedOpt("metering_secret", - "publisher_rpc"), - cfg.DeprecatedOpt("metering_secret", - "publisher")] - ), -] -cfg.CONF.register_opts(OPTS, group="publisher") - - -def compute_signature(message, secret): - """Return the signature for a message dictionary.""" - if not secret: - return '' - - if isinstance(secret, six.text_type): - secret = secret.encode('utf-8') - digest_maker = hmac.new(secret, b'', hashlib.sha256) - for name, value in utils.recursive_keypairs(message): - if name == 'message_signature': - # Skip any existing signature value, which would not have - # been part of the original message. - continue - digest_maker.update(six.text_type(name).encode('utf-8')) - digest_maker.update(six.text_type(value).encode('utf-8')) - return digest_maker.hexdigest() - - -def besteffort_compare_digest(first, second): - """Returns True if both string inputs are equal, otherwise False. - - This function should take a constant amount of time regardless of - how many characters in the strings match. - - """ - # NOTE(sileht): compare_digest method protected for timing-attacks - # exists since python >= 2.7.7 and python >= 3.3 - # this a bit less-secure python fallback version - # taken from https://github.com/openstack/python-keystoneclient/blob/ - # master/keystoneclient/middleware/memcache_crypt.py#L88 - if len(first) != len(second): - return False - result = 0 - if six.PY3 and isinstance(first, bytes) and isinstance(second, bytes): - for x, y in zip(first, second): - result |= x ^ y - else: - for x, y in zip(first, second): - result |= ord(x) ^ ord(y) - return result == 0 - - -if hasattr(hmac, 'compare_digest'): - compare_digest = hmac.compare_digest -else: - compare_digest = besteffort_compare_digest - - -def verify_signature(message, secret): - """Check the signature in the message. - - Message is verified against the value computed from the rest of the - contents. - """ - if not secret: - return True - - old_sig = message.get('message_signature', '') - new_sig = compute_signature(message, secret) - - if isinstance(old_sig, six.text_type): - try: - old_sig = old_sig.encode('ascii') - except UnicodeDecodeError: - return False - if six.PY3: - new_sig = new_sig.encode('ascii') - - return compare_digest(new_sig, old_sig) - - -def meter_message_from_counter(sample, secret): - """Make a metering message ready to be published or stored. - - Returns a dictionary containing a metering message - for a notification message and a Sample instance. - """ - msg = {'source': sample.source, - 'counter_name': sample.name, - 'counter_type': sample.type, - 'counter_unit': sample.unit, - 'counter_volume': sample.volume, - 'user_id': sample.user_id, - 'project_id': sample.project_id, - 'resource_id': sample.resource_id, - 'timestamp': sample.timestamp, - 'resource_metadata': sample.resource_metadata, - 'message_id': sample.id, - } - msg['message_signature'] = compute_signature(msg, secret) - return msg - - -def message_from_event(event, secret): - """Make an event message ready to be published or stored. - - Returns a serialized model of Event containing an event message - """ - msg = event.serialize() - msg['message_signature'] = compute_signature(msg, secret) - return msg diff --git a/ceilometer/sample.py b/ceilometer/sample.py deleted file mode 100644 index 8237626c..00000000 --- a/ceilometer/sample.py +++ /dev/null @@ -1,109 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 eNovance -# -# Authors: Doug Hellmann -# Julien Danjou -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Sample class for holding data about a metering event. - -A Sample doesn't really do anything, but we need a way to -ensure that all of the appropriate fields have been filled -in by the plugins that create them. -""" - -import copy -import uuid - -from oslo_config import cfg - - -OPTS = [ - cfg.StrOpt('sample_source', - default='openstack', - help='Source for samples emitted on this instance.'), -] - -cfg.CONF.register_opts(OPTS) - - -# Fields explanation: -# -# Source: the source of this sample -# Name: the name of the meter, must be unique -# Type: the type of the meter, must be either: -# - cumulative: the value is incremented and never reset to 0 -# - delta: the value is reset to 0 each time it is sent -# - gauge: the value is an absolute value and is not a counter -# Unit: the unit of the meter -# Volume: the sample value -# User ID: the user ID -# Project ID: the project ID -# Resource ID: the resource ID -# Timestamp: when the sample has been read -# Resource metadata: various metadata -# id: an uuid of a sample, can be taken from API when post sample via API -class Sample(object): - - def __init__(self, name, type, unit, volume, user_id, project_id, - resource_id, timestamp=None, resource_metadata=None, - source=None, id=None): - self.name = name - self.type = type - self.unit = unit - self.volume = volume - self.user_id = user_id - self.project_id = project_id - self.resource_id = resource_id - self.timestamp = timestamp - self.resource_metadata = resource_metadata or {} - self.source = source or cfg.CONF.sample_source - self.id = id or str(uuid.uuid1()) - - def as_dict(self): - return copy.copy(self.__dict__) - - def __repr__(self): - return '' % ( - self.name, self.volume, self.resource_id, self.timestamp) - - @classmethod - def from_notification(cls, name, type, volume, unit, - user_id, project_id, resource_id, - message, timestamp=None, metadata=None, source=None): - if not metadata: - metadata = (copy.copy(message['payload']) - if isinstance(message['payload'], dict) else {}) - metadata['event_type'] = message['event_type'] - metadata['host'] = message['publisher_id'] - ts = timestamp if timestamp else message['timestamp'] - return cls(name=name, - type=type, - volume=volume, - unit=unit, - user_id=user_id, - project_id=project_id, - resource_id=resource_id, - timestamp=ts, - resource_metadata=metadata, - source=source) - - def set_timestamp(self, timestamp): - self.timestamp = timestamp - -TYPE_GAUGE = 'gauge' -TYPE_DELTA = 'delta' -TYPE_CUMULATIVE = 'cumulative' - -TYPES = (TYPE_GAUGE, TYPE_DELTA, TYPE_CUMULATIVE) diff --git a/ceilometer/service.py b/ceilometer/service.py index a770a8f5..8ac23e3b 100644 --- a/ceilometer/service.py +++ b/ceilometer/service.py @@ -12,69 +12,20 @@ # License for the specific language governing permissions and limitations # under the License. -import socket import sys -from keystoneauth1 import loading as ka_loading from oslo_config import cfg import oslo_i18n from oslo_log import log from oslo_reports import guru_meditation_report as gmr from ceilometer.conf import defaults -from ceilometer import keystone_client -from ceilometer import messaging from ceilometer import version -OPTS = [ - cfg.StrOpt('host', - default=socket.gethostname(), - help='Name of this node, which must be valid in an AMQP ' - 'key. Can be an opaque identifier. For ZeroMQ only, must ' - 'be a valid host name, FQDN, or IP address.'), - cfg.IntOpt('http_timeout', - default=600, - help='Timeout seconds for HTTP requests. Set it to None to ' - 'disable timeout.'), -] -cfg.CONF.register_opts(OPTS) - -API_OPT = cfg.IntOpt('workers', - default=1, - min=1, - deprecated_group='DEFAULT', - deprecated_name='api_workers', - help='Number of workers for api, default value is 1.') -cfg.CONF.register_opt(API_OPT, 'api') - -NOTI_OPT = cfg.IntOpt('workers', - default=1, - min=1, - deprecated_group='DEFAULT', - deprecated_name='notification_workers', - help='Number of workers for notification service, ' - 'default value is 1.') -cfg.CONF.register_opt(NOTI_OPT, 'notification') - -COLL_OPT = cfg.IntOpt('workers', - default=1, - min=1, - deprecated_group='DEFAULT', - deprecated_name='collector_workers', - help='Number of workers for collector service. ' - 'default value is 1.') -cfg.CONF.register_opt(COLL_OPT, 'collector') - -keystone_client.register_keystoneauth_opts(cfg.CONF) - def prepare_service(argv=None, config_files=None): oslo_i18n.enable_lazy() log.register_options(cfg.CONF) - log_levels = (cfg.CONF.default_log_levels + - ['futurist=INFO', 'neutronclient=INFO', - 'keystoneclient=INFO']) - log.set_defaults(default_log_levels=log_levels) defaults.set_cors_middleware_defaults() if argv is None: @@ -83,12 +34,9 @@ def prepare_service(argv=None, config_files=None): version=version.version_info.version_string(), default_config_files=config_files) - ka_loading.load_auth_from_conf_options(cfg.CONF, "service_credentials") - log.setup(cfg.CONF, 'ceilometer') # NOTE(liusheng): guru cannot run with service under apache daemon, so when # ceilometer-api running with mod_wsgi, the argv is [], we don't start # guru. if argv: gmr.TextGuruMeditation.setup_autorun(version) - messaging.setup() diff --git a/ceilometer/service_base.py b/ceilometer/service_base.py deleted file mode 100644 index 9fcbe11b..00000000 --- a/ceilometer/service_base.py +++ /dev/null @@ -1,153 +0,0 @@ -# -# Copyright 2015 Hewlett Packard -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -from oslo_config import cfg -from oslo_log import log -from oslo_service import service as os_service -import six - -from ceilometer.i18n import _LE, _LI -from ceilometer import pipeline - -LOG = log.getLogger(__name__) - - -class ServiceBase(os_service.Service): - def __init__(self): - self.started = False - super(ServiceBase, self).__init__() - - def start(self): - self.started = True - super(ServiceBase, self).start() - - -@six.add_metaclass(abc.ABCMeta) -class PipelineBasedService(ServiceBase): - def clear_pipeline_validation_status(self): - """Clears pipeline validation status flags.""" - self.pipeline_validated = False - self.event_pipeline_validated = False - - def init_pipeline_refresh(self): - """Initializes pipeline refresh state.""" - self.clear_pipeline_validation_status() - if cfg.CONF.refresh_pipeline_cfg: - self.set_pipeline_mtime(pipeline.get_pipeline_mtime()) - self.set_pipeline_hash(pipeline.get_pipeline_hash()) - - if cfg.CONF.refresh_event_pipeline_cfg: - self.set_pipeline_mtime(pipeline.get_pipeline_mtime( - pipeline.EVENT_TYPE), pipeline.EVENT_TYPE) - self.set_pipeline_hash(pipeline.get_pipeline_hash( - pipeline.EVENT_TYPE), pipeline.EVENT_TYPE) - - if (cfg.CONF.refresh_pipeline_cfg or - cfg.CONF.refresh_event_pipeline_cfg): - self.tg.add_timer(cfg.CONF.pipeline_polling_interval, - self.refresh_pipeline) - - def get_pipeline_mtime(self, p_type=pipeline.SAMPLE_TYPE): - return (self.event_pipeline_mtime if p_type == pipeline.EVENT_TYPE else - self.pipeline_mtime) - - def set_pipeline_mtime(self, mtime, p_type=pipeline.SAMPLE_TYPE): - if p_type == pipeline.EVENT_TYPE: - self.event_pipeline_mtime = mtime - else: - self.pipeline_mtime = mtime - - def get_pipeline_hash(self, p_type=pipeline.SAMPLE_TYPE): - return (self.event_pipeline_hash if p_type == pipeline.EVENT_TYPE else - self.pipeline_hash) - - def set_pipeline_hash(self, _hash, p_type=pipeline.SAMPLE_TYPE): - if p_type == pipeline.EVENT_TYPE: - self.event_pipeline_hash = _hash - else: - self.pipeline_hash = _hash - - @abc.abstractmethod - def reload_pipeline(self): - """Reload pipeline in the agents.""" - - def pipeline_changed(self, p_type=pipeline.SAMPLE_TYPE): - """Returns hash of changed pipeline else False.""" - - pipeline_mtime = self.get_pipeline_mtime(p_type) - mtime = pipeline.get_pipeline_mtime(p_type) - if mtime > pipeline_mtime: - LOG.info(_LI('Pipeline configuration file has been updated.')) - - self.set_pipeline_mtime(mtime, p_type) - _hash = pipeline.get_pipeline_hash(p_type) - pipeline_hash = self.get_pipeline_hash(p_type) - if _hash != pipeline_hash: - LOG.info(_LI("Detected change in pipeline configuration.")) - return _hash - return False - - def refresh_pipeline(self): - """Refreshes appropriate pipeline, then delegates to agent.""" - - if cfg.CONF.refresh_pipeline_cfg: - pipeline_hash = self.pipeline_changed() - if pipeline_hash: - try: - # Pipeline in the notification agent. - if hasattr(self, 'pipeline_manager'): - self.pipeline_manager = pipeline.setup_pipeline() - # Polling in the polling agent. - elif hasattr(self, 'polling_manager'): - self.polling_manager = pipeline.setup_polling() - LOG.debug("Pipeline has been refreshed. " - "old hash: %(old)s, new hash: %(new)s", - {'old': self.pipeline_hash, - 'new': pipeline_hash}) - self.set_pipeline_hash(pipeline_hash) - self.pipeline_validated = True - except Exception as err: - LOG.debug("Active pipeline config's hash is %s", - self.pipeline_hash) - LOG.exception(_LE('Unable to load changed pipeline: %s') - % err) - - if cfg.CONF.refresh_event_pipeline_cfg: - ev_pipeline_hash = self.pipeline_changed(pipeline.EVENT_TYPE) - if ev_pipeline_hash: - try: - # Pipeline in the notification agent. - if hasattr(self, 'event_pipeline_manager'): - self.event_pipeline_manager = (pipeline. - setup_event_pipeline()) - - LOG.debug("Event Pipeline has been refreshed. " - "old hash: %(old)s, new hash: %(new)s", - {'old': self.event_pipeline_hash, - 'new': ev_pipeline_hash}) - self.set_pipeline_hash(ev_pipeline_hash, - pipeline.EVENT_TYPE) - self.event_pipeline_validated = True - except Exception as err: - LOG.debug("Active event pipeline config's hash is %s", - self.event_pipeline_hash) - LOG.exception(_LE('Unable to load changed event pipeline:' - ' %s') % err) - - if self.pipeline_validated or self.event_pipeline_validated: - self.reload_pipeline() - self.clear_pipeline_validation_status() diff --git a/ceilometer/storage/__init__.py b/ceilometer/storage/__init__.py index 97431a53..14f66db2 100644 --- a/ceilometer/storage/__init__.py +++ b/ceilometer/storage/__init__.py @@ -22,27 +22,15 @@ import retrying import six.moves.urllib.parse as urlparse from stevedore import driver -from ceilometer import utils - LOG = log.getLogger(__name__) OPTS = [ - cfg.IntOpt('metering_time_to_live', - default=-1, - help="Number of seconds that samples are kept " - "in the database for (<= 0 means forever).", - deprecated_opts=[cfg.DeprecatedOpt('time_to_live', - 'database')]), cfg.IntOpt('event_time_to_live', default=-1, help=("Number of seconds that events are kept " "in the database for (<= 0 means forever).")), - cfg.StrOpt('metering_connection', - secret=True, - help='The connection string used to connect to the metering ' - 'database. (if unset, connection is used)'), cfg.StrOpt('event_connection', secret=True, help='The connection string used to connect to the event ' @@ -51,17 +39,6 @@ OPTS = [ cfg.CONF.register_opts(OPTS, group='database') -CLI_OPTS = [ - cfg.BoolOpt('sql-expire-samples-only', - default=False, - help="Indicates if expirer expires only samples. If set true," - " expired samples will be deleted, but residual" - " resource and meter definition data will remain.", - ), -] - -cfg.CONF.register_cli_opts(CLI_OPTS) - db_options.set_defaults(cfg.CONF) @@ -78,87 +55,28 @@ class StorageBadAggregate(Exception): code = 400 -def get_connection_from_config(conf, purpose='metering'): +def get_connection_from_config(conf): retries = conf.database.max_retries # Convert retry_interval secs to msecs for retry decorator @retrying.retry(wait_fixed=conf.database.retry_interval * 1000, stop_max_attempt_number=retries if retries >= 0 else None) def _inner(): - namespace = 'ceilometer.%s.storage' % purpose - url = (getattr(conf.database, '%s_connection' % purpose) or + url = (getattr(conf.database, 'event_connection') or conf.database.connection) - return get_connection(url, namespace) + return get_connection(url) return _inner() -def get_connection(url, namespace): +def get_connection(url): """Return an open connection to the database.""" connection_scheme = urlparse.urlparse(url).scheme # SqlAlchemy connections specify may specify a 'dialect' or # 'dialect+driver'. Handle the case where driver is specified. engine_name = connection_scheme.split('+')[0] # NOTE: translation not applied bug #1446983 - LOG.debug('looking for %(name)r driver in %(namespace)r', - {'name': engine_name, 'namespace': namespace}) - mgr = driver.DriverManager(namespace, engine_name) + LOG.debug('looking for %(name)r driver in ceilometer.event.storage', + {'name': engine_name}) + mgr = driver.DriverManager('ceilometer.event.storage', engine_name) return mgr.driver(url) - - -class SampleFilter(object): - """Holds the properties for building a query from a meter/sample filter. - - :param user: The sample owner. - :param project: The sample project. - :param start_timestamp: Earliest time point in the request. - :param start_timestamp_op: Earliest timestamp operation in the request. - :param end_timestamp: Latest time point in the request. - :param end_timestamp_op: Latest timestamp operation in the request. - :param resource: Optional filter for resource id. - :param meter: Optional filter for meter type using the meter name. - :param source: Optional source filter. - :param message_id: Optional sample_id filter. - :param metaquery: Optional filter on the metadata - """ - def __init__(self, user=None, project=None, - start_timestamp=None, start_timestamp_op=None, - end_timestamp=None, end_timestamp_op=None, - resource=None, meter=None, - source=None, message_id=None, - metaquery=None): - self.user = user - self.project = project - self.start_timestamp = utils.sanitize_timestamp(start_timestamp) - self.start_timestamp_op = start_timestamp_op - self.end_timestamp = utils.sanitize_timestamp(end_timestamp) - self.end_timestamp_op = end_timestamp_op - self.resource = resource - self.meter = meter - self.source = source - self.metaquery = metaquery or {} - self.message_id = message_id - - def __repr__(self): - return ("" % - (self.user, - self.project, - self.start_timestamp, - self.start_timestamp_op, - self.end_timestamp, - self.end_timestamp_op, - self.resource, - self.meter, - self.source, - self.metaquery, - self.message_id)) diff --git a/ceilometer/storage/base.py b/ceilometer/storage/base.py index e8c4e97e..a6444739 100644 --- a/ceilometer/storage/base.py +++ b/ceilometer/storage/base.py @@ -15,60 +15,7 @@ """Base classes for storage engines """ -import datetime -import inspect -import math - -from oslo_utils import timeutils import six -from six import moves - -import ceilometer - - -def iter_period(start, end, period): - """Split a time from start to end in periods of a number of seconds. - - This function yields the (start, end) time for each period composing the - time passed as argument. - - :param start: When the period set start. - :param end: When the period end starts. - :param period: The duration of the period. - """ - period_start = start - increment = datetime.timedelta(seconds=period) - for i in moves.xrange(int(math.ceil( - timeutils.delta_seconds(start, end) - / float(period)))): - next_start = period_start + increment - yield (period_start, next_start) - period_start = next_start - - -def _handle_sort_key(model_name, sort_key=None): - """Generate sort keys according to the passed in sort key from user. - - :param model_name: Database model name be query.(meter, etc.) - :param sort_key: sort key passed from user. - return: sort keys list - """ - sort_keys_extra = {'meter': ['user_id', 'project_id'], - 'resource': ['user_id', 'project_id', 'timestamp'], - } - - sort_keys = sort_keys_extra[model_name] - if not sort_key: - return sort_keys - # NOTE(Fengqian): We need to put the sort key from user - # in the first place of sort keys list. - try: - sort_keys.remove(sort_key) - except ValueError: - pass - finally: - sort_keys.insert(0, sort_key) - return sort_keys class Model(object): @@ -92,154 +39,3 @@ class Model(object): def __eq__(self, other): return self.as_dict() == other.as_dict() - - @classmethod - def get_field_names(cls): - fields = inspect.getargspec(cls.__init__)[0] - return set(fields) - set(["self"]) - - -class Connection(object): - """Base class for storage system connections.""" - - # A dictionary representing the capabilities of this driver. - CAPABILITIES = { - 'meters': {'query': {'simple': False, - 'metadata': False}}, - 'resources': {'query': {'simple': False, - 'metadata': False}}, - 'samples': {'query': {'simple': False, - 'metadata': False, - 'complex': False}}, - 'statistics': {'groupby': False, - 'query': {'simple': False, - 'metadata': False}, - 'aggregation': {'standard': False, - 'selectable': { - 'max': False, - 'min': False, - 'sum': False, - 'avg': False, - 'count': False, - 'stddev': False, - 'cardinality': False}} - }, - } - - STORAGE_CAPABILITIES = { - 'storage': {'production_ready': False}, - } - - def __init__(self, url): - pass - - @staticmethod - def upgrade(): - """Migrate the database to `version` or the most recent version.""" - - @staticmethod - def record_metering_data(data): - """Write the data to the backend storage system. - - :param data: a dictionary such as returned by - ceilometer.publisher.utils.meter_message_from_counter - - All timestamps must be naive utc datetime object. - """ - raise ceilometer.NotImplementedError( - 'Recording metering data is not implemented') - - @staticmethod - def clear_expired_metering_data(ttl): - """Clear expired data from the backend storage system. - - Clearing occurs according to the time-to-live. - - :param ttl: Number of seconds to keep records for. - """ - raise ceilometer.NotImplementedError( - 'Clearing samples not implemented') - - @staticmethod - def get_resources(user=None, project=None, source=None, - start_timestamp=None, start_timestamp_op=None, - end_timestamp=None, end_timestamp_op=None, - metaquery=None, resource=None, limit=None): - """Return an iterable of models.Resource instances. - - Iterable items containing resource information. - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param source: Optional source filter. - :param start_timestamp: Optional modified timestamp start range. - :param start_timestamp_op: Optional timestamp start range operation. - :param end_timestamp: Optional modified timestamp end range. - :param end_timestamp_op: Optional timestamp end range operation. - :param metaquery: Optional dict with metadata to match on. - :param resource: Optional resource filter. - :param limit: Maximum number of results to return. - """ - raise ceilometer.NotImplementedError('Resources not implemented') - - @staticmethod - def get_meters(user=None, project=None, resource=None, source=None, - metaquery=None, limit=None, unique=False): - """Return an iterable of model.Meter instances. - - Iterable items containing meter information. - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param resource: Optional resource filter. - :param source: Optional source filter. - :param metaquery: Optional dict with metadata to match on. - :param limit: Maximum number of results to return. - :param unique: If set to true, return only unique meter information. - """ - raise ceilometer.NotImplementedError('Meters not implemented') - - @staticmethod - def get_samples(sample_filter, limit=None): - """Return an iterable of model.Sample instances. - - :param sample_filter: Filter. - :param limit: Maximum number of results to return. - """ - raise ceilometer.NotImplementedError('Samples not implemented') - - @staticmethod - def get_meter_statistics(sample_filter, period=None, groupby=None, - aggregate=None): - """Return an iterable of model.Statistics instances. - - The filter must have a meter value set. - """ - raise ceilometer.NotImplementedError('Statistics not implemented') - - @staticmethod - def clear(): - """Clear database.""" - - @staticmethod - def query_samples(filter_expr=None, orderby=None, limit=None): - """Return an iterable of model.Sample objects. - - :param filter_expr: Filter expression for query. - :param orderby: List of field name and direction pairs for order by. - :param limit: Maximum number of results to return. - """ - - raise ceilometer.NotImplementedError('Complex query for samples ' - 'is not implemented.') - - @classmethod - def get_capabilities(cls): - """Return an dictionary with the capabilities of each driver.""" - return cls.CAPABILITIES - - @classmethod - def get_storage_capabilities(cls): - """Return a dictionary representing the performance capabilities. - - This is needed to evaluate the performance of each driver. - """ - return cls.STORAGE_CAPABILITIES diff --git a/ceilometer/storage/hbase/migration.py b/ceilometer/storage/hbase/migration.py deleted file mode 100644 index 9cc3df93..00000000 --- a/ceilometer/storage/hbase/migration.py +++ /dev/null @@ -1,103 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""HBase storage backend migrations -""" - -import re - -from ceilometer.storage.hbase import utils as hbase_utils - - -def migrate_resource_table(conn, table): - """Migrate table 'resource' in HBase. - - Change qualifiers format from "%s+%s+%s!%s!%s" % - (rts, source, counter_name, counter_type,counter_unit) - in columns with meters f:m_* - to new separator format "%s:%s:%s:%s:%s" % - (rts, source, counter_name, counter_type,counter_unit) - """ - resource_table = conn.table(table) - resource_filter = ("QualifierFilter(=, " - "'regexstring:m_\\d{19}\\+" - "[\\w-\\._]*\\+[\\w-\\._!]')") - gen = resource_table.scan(filter=resource_filter) - for row, data in gen: - columns = [] - updated_columns = dict() - column_prefix = "f:" - for column, value in data.items(): - if column.startswith('f:m_'): - columns.append(column) - parts = column[2:].split("+", 2) - parts.extend(parts.pop(2).split("!")) - column = hbase_utils.prepare_key(*parts) - updated_columns[column_prefix + column] = value - resource_table.put(row, updated_columns) - resource_table.delete(row, columns) - - -def migrate_meter_table(conn, table): - """Migrate table 'meter' in HBase. - - Change row format from "%s_%d_%s" % (counter_name, rts, message_signature) - to new separator format "%s:%s:%s" % (counter_name, rts, message_signature) - """ - meter_table = conn.table(table) - meter_filter = ("RowFilter(=, " - "'regexstring:[\\w\\._-]*_\\d{19}_\\w*')") - gen = meter_table.scan(filter=meter_filter) - for row, data in gen: - parts = row.rsplit('_', 2) - new_row = hbase_utils.prepare_key(*parts) - meter_table.put(new_row, data) - meter_table.delete(row) - - -def migrate_event_table(conn, table): - """Migrate table 'event' in HBase. - - Change row format from ""%d_%s" % timestamp, event_id, - to new separator format "%s:%s" % timestamp, event_id - Also change trait columns from %s+%s % trait.name, trait.dtype - to %s:%s % trait.name, trait.dtype - """ - event_table = conn.table(table) - event_filter = "RowFilter(=, 'regexstring:\\d*_\\w*')" - gen = event_table.scan(filter=event_filter) - trait_pattern = re.compile("f:[\w\-_]*\+\w") - column_prefix = "f:" - for row, data in gen: - row_parts = row.split("_", 1) - update_data = {} - for column, value in data.items(): - if trait_pattern.match(column): - trait_parts = column[2:].rsplit('+', 1) - column = hbase_utils.prepare_key(*trait_parts) - update_data[column_prefix + column] = value - new_row = hbase_utils.prepare_key(*row_parts) - event_table.put(new_row, update_data) - event_table.delete(row) - - -TABLE_MIGRATION_FUNCS = {'resource': migrate_resource_table, - 'meter': migrate_meter_table, - 'event': migrate_event_table} - - -def migrate_tables(conn, tables): - if type(tables) is not list: - tables = [tables] - for table in tables: - if table in TABLE_MIGRATION_FUNCS: - TABLE_MIGRATION_FUNCS.get(table)(conn, table) diff --git a/ceilometer/storage/hbase/utils.py b/ceilometer/storage/hbase/utils.py index f151558e..c348684b 100644 --- a/ceilometer/storage/hbase/utils.py +++ b/ceilometer/storage/hbase/utils.py @@ -191,139 +191,6 @@ def make_query(metaquery=None, trait_query=None, **kwargs): return res_q -def get_meter_columns(metaquery=None, need_timestamp=False, **kwargs): - """Return a list of required columns in meter table to be scanned. - - SingleColumnFilter has 'columns' filter that should be used to determine - what columns we are interested in. But if we want to use 'filter' and - 'columns' together we have to include columns we are filtering by - to columns list. - - Please see an example: If we make scan with filter - "SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')" - and columns ['f:rts'], the output will be always empty - because only 'rts' will be returned and filter will be applied - to this data so 's_test-1' cannot be find. - To make this request correct it should be fixed as follows: - filter = "SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')", - columns = ['f:rts','f:s_test-1']} - - :param metaquery: optional metaquery dict - :param need_timestamp: flag, which defines the need for timestamp columns - :param kwargs: key-value pairs to filter on. Key should be a real - column name in db - """ - columns = ['f:message', 'f:recorded_at'] - columns.extend("f:%s" % k for k, v in kwargs.items() - if v is not None) - if metaquery: - columns.extend("f:r_%s" % k for k, v in metaquery.items() - if v is not None) - source = kwargs.get('source') - if source: - columns.append("f:s_%s" % source) - if need_timestamp: - columns.extend(['f:rts', 'f:timestamp']) - return columns - - -def make_sample_query_from_filter(sample_filter, require_meter=True): - """Return a query dictionary based on the settings in the filter. - - :param sample_filter: SampleFilter instance - :param require_meter: If true and the filter does not have a meter, - raise an error. - """ - - meter = sample_filter.meter - if not meter and require_meter: - raise RuntimeError('Missing required meter specifier') - start_row, end_row, ts_query = make_timestamp_query( - make_general_rowkey_scan, - start=sample_filter.start_timestamp, - start_op=sample_filter.start_timestamp_op, - end=sample_filter.end_timestamp, - end_op=sample_filter.end_timestamp_op, - some_id=meter) - kwargs = dict(user_id=sample_filter.user, - project_id=sample_filter.project, - counter_name=meter, - resource_id=sample_filter.resource, - source=sample_filter.source, - message_id=sample_filter.message_id) - - q = make_query(metaquery=sample_filter.metaquery, **kwargs) - - if q: - res_q = q + " AND " + ts_query if ts_query else q - else: - res_q = ts_query if ts_query else None - - need_timestamp = (sample_filter.start_timestamp or - sample_filter.end_timestamp) is not None - columns = get_meter_columns(metaquery=sample_filter.metaquery, - need_timestamp=need_timestamp, **kwargs) - return res_q, start_row, end_row, columns - - -def make_meter_query_for_resource(start_timestamp, start_timestamp_op, - end_timestamp, end_timestamp_op, source, - query=None): - """This method is used when Resource table should be filtered by meters. - - In this method we are looking into all qualifiers with m_ prefix. - :param start_timestamp: meter's timestamp start range. - :param start_timestamp_op: meter's start time operator, like ge, gt. - :param end_timestamp: meter's timestamp end range. - :param end_timestamp_op: meter's end time operator, like lt, le. - :param source: source filter. - :param query: a query string to concatenate with. - """ - start_rts, end_rts = get_start_end_rts(start_timestamp, end_timestamp) - mq = [] - start_op = start_timestamp_op or 'ge' - end_op = end_timestamp_op or 'lt' - - if start_rts: - filter_value = (start_rts + ':' + quote(source) if source - else start_rts) - mq.append(_QualifierFilter(OP_SIGN_REV[start_op], filter_value)) - - if end_rts: - filter_value = (end_rts + ':' + quote(source) if source - else end_rts) - mq.append(_QualifierFilter(OP_SIGN_REV[end_op], filter_value)) - - if mq: - meter_q = " AND ".join(mq) - # If there is a filtering on time_range we need to point that - # qualifiers should start with m_. Otherwise in case e.g. - # QualifierFilter (>=, 'binaryprefix:m_9222030811134775808') - # qualifier 's_test' satisfies the filter and will be returned. - meter_q = _QualifierFilter("=", '') + " AND " + meter_q - query = meter_q if not query else query + " AND " + meter_q - return query - - -def make_general_rowkey_scan(rts_start=None, rts_end=None, some_id=None): - """If it's filter on some_id without start and end. - - start_row = some_id while end_row = some_id + MAX_BYTE. - """ - if some_id is None: - return None, None - if not rts_start: - # NOTE(idegtiarov): Here we could not use chr > 122 because chr >= 123 - # will be quoted and character will be turn in a composition that is - # started with '%' (chr(37)) that lexicographically is less than chr - # of number - rts_start = chr(122) - end_row = prepare_key(some_id, rts_start) - start_row = prepare_key(some_id, rts_end) - - return start_row, end_row - - def prepare_key(*args): """Prepares names for rows and columns with correct separator. @@ -338,16 +205,6 @@ def prepare_key(*args): return ":".join(key_quote) -def timestamp_from_record_tuple(record): - """Extract timestamp from HBase tuple record.""" - return record[0]['timestamp'] - - -def resource_id_from_record_tuple(record): - """Extract resource_id from HBase tuple record.""" - return record[0]['resource_id'] - - def deserialize_entry(entry, get_raw_meta=True): """Return a list of flatten_result, sources, meters and metadata. diff --git a/ceilometer/storage/impl_hbase.py b/ceilometer/storage/impl_hbase.py deleted file mode 100644 index eb3423e2..00000000 --- a/ceilometer/storage/impl_hbase.py +++ /dev/null @@ -1,439 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import operator -import time - -from oslo_log import log -from oslo_utils import timeutils - -import ceilometer -from ceilometer.storage import base -from ceilometer.storage.hbase import base as hbase_base -from ceilometer.storage.hbase import migration as hbase_migration -from ceilometer.storage.hbase import utils as hbase_utils -from ceilometer.storage import models -from ceilometer import utils - -LOG = log.getLogger(__name__) - - -AVAILABLE_CAPABILITIES = { - 'meters': {'query': {'simple': True, - 'metadata': True}}, - 'resources': {'query': {'simple': True, - 'metadata': True}}, - 'samples': {'query': {'simple': True, - 'metadata': True}}, - 'statistics': {'query': {'simple': True, - 'metadata': True}, - 'aggregation': {'standard': True}}, -} - - -AVAILABLE_STORAGE_CAPABILITIES = { - 'storage': {'production_ready': True}, -} - - -class Connection(hbase_base.Connection, base.Connection): - """Put the metering data into a HBase database - - Collections: - - - meter (describes sample actually): - - - row-key: consists of reversed timestamp, meter and a message uuid - for purposes of uniqueness - - Column Families: - - f: contains the following qualifiers: - - - counter_name: - - counter_type: - - counter_unit: - - counter_volume: - - message: - - message_id: - - message_signature: - - resource_metadata: raw metadata for corresponding resource - of the meter - - project_id: - - resource_id: - - user_id: - - recorded_at: - - flattened metadata with prefix r_metadata. e.g.:: - - f:r_metadata.display_name or f:r_metadata.tag - - - rts: - - timestamp: - - source for meter with prefix 's' - - - resource: - - - row_key: uuid of resource - - Column Families: - - f: contains the following qualifiers: - - - resource_metadata: raw metadata for corresponding resource - - project_id: - - resource_id: - - user_id: - - flattened metadata with prefix r_metadata. e.g.:: - - f:r_metadata.display_name or f:r_metadata.tag - - - sources for all corresponding meters with prefix 's' - - all meters with prefix 'm' for this resource in format: - - .. code-block:: python - - "%s:%s:%s:%s:%s" % (rts, source, counter_name, counter_type, - counter_unit) - """ - - CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, - AVAILABLE_CAPABILITIES) - STORAGE_CAPABILITIES = utils.update_nested( - base.Connection.STORAGE_CAPABILITIES, - AVAILABLE_STORAGE_CAPABILITIES, - ) - _memory_instance = None - - RESOURCE_TABLE = "resource" - METER_TABLE = "meter" - - def __init__(self, url): - super(Connection, self).__init__(url) - - def upgrade(self): - tables = [self.RESOURCE_TABLE, self.METER_TABLE] - column_families = {'f': dict(max_versions=1)} - with self.conn_pool.connection() as conn: - hbase_utils.create_tables(conn, tables, column_families) - hbase_migration.migrate_tables(conn, tables) - - def clear(self): - LOG.debug('Dropping HBase schema...') - with self.conn_pool.connection() as conn: - for table in [self.RESOURCE_TABLE, - self.METER_TABLE]: - try: - conn.disable_table(table) - except Exception: - LOG.debug('Cannot disable table but ignoring error') - try: - conn.delete_table(table) - except Exception: - LOG.debug('Cannot delete table but ignoring error') - - def record_metering_data(self, data): - """Write the data to the backend storage system. - - :param data: a dictionary such as returned by - ceilometer.publisher.utils.meter_message_from_counter - """ - with self.conn_pool.connection() as conn: - resource_table = conn.table(self.RESOURCE_TABLE) - meter_table = conn.table(self.METER_TABLE) - - resource_metadata = data.get('resource_metadata', {}) - # Determine the name of new meter - rts = hbase_utils.timestamp(data['timestamp']) - new_meter = hbase_utils.prepare_key( - rts, data['source'], data['counter_name'], - data['counter_type'], data['counter_unit']) - - # TODO(nprivalova): try not to store resource_id - resource = hbase_utils.serialize_entry(**{ - 'source': data['source'], - 'meter': {new_meter: data['timestamp']}, - 'resource_metadata': resource_metadata, - 'resource_id': data['resource_id'], - 'project_id': data['project_id'], 'user_id': data['user_id']}) - # Here we put entry in HBase with our own timestamp. This is needed - # when samples arrive out-of-order - # If we use timestamp=data['timestamp'] the newest data will be - # automatically 'on the top'. It is needed to keep metadata - # up-to-date: metadata from newest samples is considered as actual. - ts = int(time.mktime(data['timestamp'].timetuple()) * 1000) - resource_table.put(hbase_utils.encode_unicode(data['resource_id']), - resource, ts) - - # Rowkey consists of reversed timestamp, meter and a - # message uuid for purposes of uniqueness - row = hbase_utils.prepare_key(data['counter_name'], rts, - data['message_id']) - record = hbase_utils.serialize_entry( - data, **{'source': data['source'], 'rts': rts, - 'message': data, 'recorded_at': timeutils.utcnow()}) - meter_table.put(row, record) - - def get_resources(self, user=None, project=None, source=None, - start_timestamp=None, start_timestamp_op=None, - end_timestamp=None, end_timestamp_op=None, - metaquery=None, resource=None, limit=None): - """Return an iterable of models.Resource instances - - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param source: Optional source filter. - :param start_timestamp: Optional modified timestamp start range. - :param start_timestamp_op: Optional start time operator, like ge, gt. - :param end_timestamp: Optional modified timestamp end range. - :param end_timestamp_op: Optional end time operator, like lt, le. - :param metaquery: Optional dict with metadata to match on. - :param resource: Optional resource filter. - :param limit: Maximum number of results to return. - """ - if limit == 0: - return - q = hbase_utils.make_query(metaquery=metaquery, user_id=user, - project_id=project, - resource_id=resource, source=source) - q = hbase_utils.make_meter_query_for_resource(start_timestamp, - start_timestamp_op, - end_timestamp, - end_timestamp_op, - source, q) - with self.conn_pool.connection() as conn: - resource_table = conn.table(self.RESOURCE_TABLE) - LOG.debug("Query Resource table: %s", q) - for resource_id, data in resource_table.scan(filter=q, - limit=limit): - f_res, meters, md = hbase_utils.deserialize_entry( - data) - resource_id = hbase_utils.encode_unicode(resource_id) - # Unfortunately happybase doesn't keep ordered result from - # HBase. So that's why it's needed to find min and max - # manually - first_ts = min(meters, key=operator.itemgetter(1))[1] - last_ts = max(meters, key=operator.itemgetter(1))[1] - source = meters[0][0][1] - # If we use QualifierFilter then HBase returns only - # qualifiers filtered by. It will not return the whole entry. - # That's why if we need to ask additional qualifiers manually. - if 'project_id' not in f_res and 'user_id' not in f_res: - row = resource_table.row( - resource_id, columns=['f:project_id', 'f:user_id', - 'f:resource_metadata']) - f_res, _m, md = hbase_utils.deserialize_entry(row) - yield models.Resource( - resource_id=resource_id, - first_sample_timestamp=first_ts, - last_sample_timestamp=last_ts, - project_id=f_res['project_id'], - source=source, - user_id=f_res['user_id'], - metadata=md) - - def get_meters(self, user=None, project=None, resource=None, source=None, - metaquery=None, limit=None, unique=False): - """Return an iterable of models.Meter instances - - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param resource: Optional resource filter. - :param source: Optional source filter. - :param metaquery: Optional dict with metadata to match on. - :param limit: Maximum number of results to return. - :param unique: If set to true, return only unique meter information. - """ - if limit == 0: - return - - metaquery = metaquery or {} - - with self.conn_pool.connection() as conn: - resource_table = conn.table(self.RESOURCE_TABLE) - q = hbase_utils.make_query(metaquery=metaquery, user_id=user, - project_id=project, - resource_id=resource, - source=source) - LOG.debug("Query Resource table: %s", q) - - gen = resource_table.scan(filter=q) - # We need result set to be sure that user doesn't receive several - # same meters. Please see bug - # https://bugs.launchpad.net/ceilometer/+bug/1301371 - result = set() - for ignored, data in gen: - flatten_result, meters, md = hbase_utils.deserialize_entry( - data) - for m in meters: - if limit and len(result) >= limit: - return - _m_rts, m_source, name, m_type, unit = m[0] - if unique: - meter_dict = {'name': name, - 'type': m_type, - 'unit': unit, - 'resource_id': None, - 'project_id': None, - 'user_id': None, - 'source': None} - else: - meter_dict = {'name': name, - 'type': m_type, - 'unit': unit, - 'resource_id': - flatten_result['resource_id'], - 'project_id': - flatten_result['project_id'], - 'user_id': - flatten_result['user_id']} - - frozen_meter = frozenset(meter_dict.items()) - if frozen_meter in result: - continue - result.add(frozen_meter) - if not unique: - meter_dict.update({'source': m_source - if m_source else None}) - - yield models.Meter(**meter_dict) - - def get_samples(self, sample_filter, limit=None): - """Return an iterable of models.Sample instances. - - :param sample_filter: Filter. - :param limit: Maximum number of results to return. - """ - if limit == 0: - return - with self.conn_pool.connection() as conn: - meter_table = conn.table(self.METER_TABLE) - q, start, stop, columns = (hbase_utils. - make_sample_query_from_filter - (sample_filter, require_meter=False)) - LOG.debug("Query Meter Table: %s", q) - gen = meter_table.scan(filter=q, row_start=start, row_stop=stop, - limit=limit, columns=columns) - for ignored, meter in gen: - d_meter = hbase_utils.deserialize_entry(meter)[0] - d_meter['message']['counter_volume'] = ( - float(d_meter['message']['counter_volume'])) - d_meter['message']['recorded_at'] = d_meter['recorded_at'] - yield models.Sample(**d_meter['message']) - - @staticmethod - def _update_meter_stats(stat, meter): - """Do the stats calculation on a requested time bucket in stats dict - - :param stats: dict where aggregated stats are kept - :param index: time bucket index in stats - :param meter: meter record as returned from HBase - :param start_time: query start time - :param period: length of the time bucket - """ - vol = meter['counter_volume'] - ts = meter['timestamp'] - stat.unit = meter['counter_unit'] - stat.min = min(vol, stat.min or vol) - stat.max = max(vol, stat.max) - stat.sum = vol + (stat.sum or 0) - stat.count += 1 - stat.avg = (stat.sum / float(stat.count)) - stat.duration_start = min(ts, stat.duration_start or ts) - stat.duration_end = max(ts, stat.duration_end or ts) - stat.duration = (timeutils.delta_seconds(stat.duration_start, - stat.duration_end)) - - def get_meter_statistics(self, sample_filter, period=None, groupby=None, - aggregate=None): - """Return an iterable of models.Statistics instances. - - Items are containing meter statistics described by the query - parameters. The filter must have a meter value set. - - .. note:: - - Due to HBase limitations the aggregations are implemented - in the driver itself, therefore this method will be quite slow - because of all the Thrift traffic it is going to create. - """ - if groupby: - raise ceilometer.NotImplementedError("Group by not implemented.") - - if aggregate: - raise ceilometer.NotImplementedError( - 'Selectable aggregates not implemented') - - with self.conn_pool.connection() as conn: - meter_table = conn.table(self.METER_TABLE) - q, start, stop, columns = (hbase_utils. - make_sample_query_from_filter - (sample_filter)) - # These fields are used in statistics' calculating - columns.extend(['f:timestamp', 'f:counter_volume', - 'f:counter_unit']) - meters = map(hbase_utils.deserialize_entry, - list(meter for (ignored, meter) in - meter_table.scan( - filter=q, row_start=start, - row_stop=stop, columns=columns))) - - if sample_filter.start_timestamp: - start_time = sample_filter.start_timestamp - elif meters: - start_time = meters[-1][0]['timestamp'] - else: - start_time = None - - if sample_filter.end_timestamp: - end_time = sample_filter.end_timestamp - elif meters: - end_time = meters[0][0]['timestamp'] - else: - end_time = None - - results = [] - - if not period: - period = 0 - period_start = start_time - period_end = end_time - - # As our HBase meters are stored as newest-first, we need to iterate - # in the reverse order - for meter in meters[::-1]: - ts = meter[0]['timestamp'] - if period: - offset = int(timeutils.delta_seconds( - start_time, ts) / period) * period - period_start = start_time + datetime.timedelta(0, offset) - - if not results or not results[-1].period_start == period_start: - if period: - period_end = period_start + datetime.timedelta( - 0, period) - results.append( - models.Statistics(unit='', - count=0, - min=0, - max=0, - avg=0, - sum=0, - period=period, - period_start=period_start, - period_end=period_end, - duration=None, - duration_start=None, - duration_end=None, - groupby=None) - ) - self._update_meter_stats(results[-1], meter[0]) - return results diff --git a/ceilometer/storage/impl_log.py b/ceilometer/storage/impl_log.py deleted file mode 100644 index ff52862e..00000000 --- a/ceilometer/storage/impl_log.py +++ /dev/null @@ -1,131 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Simple logging storage backend. -""" - -from oslo_log import log - -from ceilometer.i18n import _LI -from ceilometer.storage import base - -LOG = log.getLogger(__name__) - - -class Connection(base.Connection): - """Log the data.""" - - def upgrade(self): - pass - - def clear(self): - pass - - def record_metering_data(self, data): - """Write the data to the backend storage system. - - :param data: a dictionary such as returned by - ceilometer.meter.meter_message_from_counter. - """ - LOG.info(_LI('metering data %(counter_name)s for %(resource_id)s: ' - '%(counter_volume)s') - % ({'counter_name': data['counter_name'], - 'resource_id': data['resource_id'], - 'counter_volume': data['counter_volume']})) - - def clear_expired_metering_data(self, ttl): - """Clear expired data from the backend storage system. - - Clearing occurs according to the time-to-live. - :param ttl: Number of seconds to keep records for. - """ - LOG.info(_LI("Dropping metering data with TTL %d"), ttl) - - def get_resources(self, user=None, project=None, source=None, - start_timestamp=None, start_timestamp_op=None, - end_timestamp=None, end_timestamp_op=None, - metaquery=None, resource=None, limit=None): - """Return an iterable of dictionaries containing resource information. - - { 'resource_id': UUID of the resource, - 'project_id': UUID of project owning the resource, - 'user_id': UUID of user owning the resource, - 'timestamp': UTC datetime of last update to the resource, - 'metadata': most current metadata for the resource, - 'meter': list of the meters reporting data for the resource, - } - - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param source: Optional source filter. - :param start_timestamp: Optional modified timestamp start range. - :param start_timestamp_op: Optional start time operator, like gt, ge. - :param end_timestamp: Optional modified timestamp end range. - :param end_timestamp_op: Optional end time operator, like lt, le. - :param metaquery: Optional dict with metadata to match on. - :param resource: Optional resource filter. - :param limit: Maximum number of results to return. - """ - return [] - - def get_meters(self, user=None, project=None, resource=None, source=None, - limit=None, metaquery=None, unique=False): - """Return an iterable of dictionaries containing meter information. - - { 'name': name of the meter, - 'type': type of the meter (gauge, delta, cumulative), - 'resource_id': UUID of the resource, - 'project_id': UUID of project owning the resource, - 'user_id': UUID of user owning the resource, - } - - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param resource: Optional resource filter. - :param source: Optional source filter. - :param limit: Maximum number of results to return. - :param metaquery: Optional dict with metadata to match on. - :param unique: If set to true, return only unique meter information. - """ - return [] - - def get_samples(self, sample_filter, limit=None): - """Return an iterable of samples. - - Items are created by - ceilometer.publisher.utils.meter_message_from_counter. - """ - return [] - - def get_meter_statistics(self, sample_filter, period=None, groupby=None, - aggregate=None): - """Return a dictionary containing meter statistics. - - Meter statistics is described by the query parameters. - The filter must have a meter value set. - - { 'min': - 'max': - 'avg': - 'sum': - 'count': - 'period': - 'period_start': - 'period_end': - 'duration': - 'duration_start': - 'duration_end': - } - """ - return [] diff --git a/ceilometer/storage/impl_mongodb.py b/ceilometer/storage/impl_mongodb.py deleted file mode 100644 index afe07df1..00000000 --- a/ceilometer/storage/impl_mongodb.py +++ /dev/null @@ -1,679 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 eNovance -# Copyright 2014 Red Hat, Inc -# -# Authors: Doug Hellmann -# Julien Danjou -# Eoghan Glynn -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""MongoDB storage backend""" - -import copy -import datetime -import uuid - -import bson.code -import bson.objectid -from oslo_config import cfg -from oslo_log import log -from oslo_utils import timeutils -import pymongo -import six - -import ceilometer -from ceilometer.i18n import _ -from ceilometer import storage -from ceilometer.storage import base -from ceilometer.storage import models -from ceilometer.storage.mongo import utils as pymongo_utils -from ceilometer.storage import pymongo_base -from ceilometer import utils - -LOG = log.getLogger(__name__) - - -AVAILABLE_CAPABILITIES = { - 'resources': {'query': {'simple': True, - 'metadata': True}}, - 'statistics': {'groupby': True, - 'query': {'simple': True, - 'metadata': True}, - 'aggregation': {'standard': True, - 'selectable': {'max': True, - 'min': True, - 'sum': True, - 'avg': True, - 'count': True, - 'stddev': True, - 'cardinality': True}}} -} - - -class Connection(pymongo_base.Connection): - """Put the data into a MongoDB database - - Collections:: - - - meter - - the raw incoming data - - resource - - the metadata for resources - - { _id: uuid of resource, - metadata: metadata dictionaries - user_id: uuid - project_id: uuid - meter: [ array of {counter_name: string, counter_type: string, - counter_unit: string} ] - } - """ - - CAPABILITIES = utils.update_nested(pymongo_base.Connection.CAPABILITIES, - AVAILABLE_CAPABILITIES) - CONNECTION_POOL = pymongo_utils.ConnectionPool() - - STANDARD_AGGREGATES = dict([(a.name, a) for a in [ - pymongo_utils.SUM_AGGREGATION, pymongo_utils.AVG_AGGREGATION, - pymongo_utils.MIN_AGGREGATION, pymongo_utils.MAX_AGGREGATION, - pymongo_utils.COUNT_AGGREGATION, - ]]) - - AGGREGATES = dict([(a.name, a) for a in [ - pymongo_utils.SUM_AGGREGATION, - pymongo_utils.AVG_AGGREGATION, - pymongo_utils.MIN_AGGREGATION, - pymongo_utils.MAX_AGGREGATION, - pymongo_utils.COUNT_AGGREGATION, - pymongo_utils.STDDEV_AGGREGATION, - pymongo_utils.CARDINALITY_AGGREGATION, - ]]) - - SORT_OPERATION_MAPPING = {'desc': (pymongo.DESCENDING, '$lt'), - 'asc': (pymongo.ASCENDING, '$gt')} - - MAP_RESOURCES = bson.code.Code(""" - function () { - emit(this.resource_id, - {user_id: this.user_id, - project_id: this.project_id, - source: this.source, - first_timestamp: this.timestamp, - last_timestamp: this.timestamp, - metadata: this.resource_metadata}) - }""") - - REDUCE_RESOURCES = bson.code.Code(""" - function (key, values) { - var merge = {user_id: values[0].user_id, - project_id: values[0].project_id, - source: values[0].source, - first_timestamp: values[0].first_timestamp, - last_timestamp: values[0].last_timestamp, - metadata: values[0].metadata} - values.forEach(function(value) { - if (merge.first_timestamp - value.first_timestamp > 0) { - merge.first_timestamp = value.first_timestamp; - merge.user_id = value.user_id; - merge.project_id = value.project_id; - merge.source = value.source; - } else if (merge.last_timestamp - value.last_timestamp <= 0) { - merge.last_timestamp = value.last_timestamp; - merge.metadata = value.metadata; - } - }); - return merge; - }""") - - _GENESIS = datetime.datetime(year=datetime.MINYEAR, month=1, day=1) - _APOCALYPSE = datetime.datetime(year=datetime.MAXYEAR, month=12, day=31, - hour=23, minute=59, second=59) - - def __init__(self, url): - - # NOTE(jd) Use our own connection pooling on top of the Pymongo one. - # We need that otherwise we overflow the MongoDB instance with new - # connection since we instantiate a Pymongo client each time someone - # requires a new storage connection. - self.conn = self.CONNECTION_POOL.connect(url) - self.version = self.conn.server_info()['versionArray'] - # Require MongoDB 2.4 to use $setOnInsert - if self.version < pymongo_utils.MINIMUM_COMPATIBLE_MONGODB_VERSION: - raise storage.StorageBadVersion( - "Need at least MongoDB %s" % - pymongo_utils.MINIMUM_COMPATIBLE_MONGODB_VERSION) - - connection_options = pymongo.uri_parser.parse_uri(url) - self.db = getattr(self.conn, connection_options['database']) - if connection_options.get('username'): - self.db.authenticate(connection_options['username'], - connection_options['password']) - - # NOTE(jd) Upgrading is just about creating index, so let's do this - # on connection to be sure at least the TTL is correctly updated if - # needed. - self.upgrade() - - @staticmethod - def update_ttl(ttl, ttl_index_name, index_field, coll): - """Update or create time_to_live indexes. - - :param ttl: time to live in seconds. - :param ttl_index_name: name of the index we want to update or create. - :param index_field: field with the index that we need to update. - :param coll: collection which indexes need to be updated. - """ - indexes = coll.index_information() - if ttl <= 0: - if ttl_index_name in indexes: - coll.drop_index(ttl_index_name) - return - - if ttl_index_name in indexes: - return coll.database.command( - 'collMod', coll.name, - index={'keyPattern': {index_field: pymongo.ASCENDING}, - 'expireAfterSeconds': ttl}) - - coll.create_index([(index_field, pymongo.ASCENDING)], - expireAfterSeconds=ttl, - name=ttl_index_name) - - def upgrade(self): - # Establish indexes - # - # We need variations for user_id vs. project_id because of the - # way the indexes are stored in b-trees. The user_id and - # project_id values are usually mutually exclusive in the - # queries, so the database won't take advantage of an index - # including both. - - # create collection if not present - if 'resource' not in self.db.conn.collection_names(): - self.db.conn.create_collection('resource') - if 'meter' not in self.db.conn.collection_names(): - self.db.conn.create_collection('meter') - - name_qualifier = dict(user_id='', project_id='project_') - background = dict(user_id=False, project_id=True) - for primary in ['user_id', 'project_id']: - name = 'meter_%sidx' % name_qualifier[primary] - self.db.meter.create_index([ - ('resource_id', pymongo.ASCENDING), - (primary, pymongo.ASCENDING), - ('counter_name', pymongo.ASCENDING), - ('timestamp', pymongo.ASCENDING), - ], name=name, background=background[primary]) - - self.db.meter.create_index([('timestamp', pymongo.DESCENDING)], - name='timestamp_idx') - - # NOTE(ityaptin) This index covers get_resource requests sorting - # and MongoDB uses part of this compound index for different - # queries based on any of user_id, project_id, last_sample_timestamp - # fields - self.db.resource.create_index([('user_id', pymongo.DESCENDING), - ('project_id', pymongo.DESCENDING), - ('last_sample_timestamp', - pymongo.DESCENDING)], - name='resource_user_project_timestamp',) - self.db.resource.create_index([('last_sample_timestamp', - pymongo.DESCENDING)], - name='last_sample_timestamp_idx') - - # update or create time_to_live index - ttl = cfg.CONF.database.metering_time_to_live - self.update_ttl(ttl, 'meter_ttl', 'timestamp', self.db.meter) - self.update_ttl(ttl, 'resource_ttl', 'last_sample_timestamp', - self.db.resource) - - def clear(self): - self.conn.drop_database(self.db.name) - # Connection will be reopened automatically if needed - self.conn.close() - - def record_metering_data(self, data): - """Write the data to the backend storage system. - - :param data: a dictionary such as returned by - ceilometer.publisher.utils.meter_message_from_counter - """ - # Record the updated resource metadata - we use $setOnInsert to - # unconditionally insert sample timestamps and resource metadata - # (in the update case, this must be conditional on the sample not - # being out-of-order) - data = copy.deepcopy(data) - data['resource_metadata'] = pymongo_utils.improve_keys( - data.pop('resource_metadata')) - resource = self.db.resource.find_one_and_update( - {'_id': data['resource_id']}, - {'$set': {'project_id': data['project_id'], - 'user_id': data['user_id'], - 'source': data['source'], - }, - '$setOnInsert': {'metadata': data['resource_metadata'], - 'first_sample_timestamp': data['timestamp'], - 'last_sample_timestamp': data['timestamp'], - }, - '$addToSet': {'meter': {'counter_name': data['counter_name'], - 'counter_type': data['counter_type'], - 'counter_unit': data['counter_unit'], - }, - }, - }, - upsert=True, - return_document=pymongo.ReturnDocument.AFTER, - ) - - # only update last sample timestamp if actually later (the usual - # in-order case) - last_sample_timestamp = resource.get('last_sample_timestamp') - if (last_sample_timestamp is None or - last_sample_timestamp <= data['timestamp']): - self.db.resource.update_one( - {'_id': data['resource_id']}, - {'$set': {'metadata': data['resource_metadata'], - 'last_sample_timestamp': data['timestamp']}} - ) - - # only update first sample timestamp if actually earlier (the unusual - # out-of-order case) - # NOTE: a null first sample timestamp is not updated as this indicates - # a pre-existing resource document dating from before we started - # recording these timestamps in the resource collection - first_sample_timestamp = resource.get('first_sample_timestamp') - if (first_sample_timestamp is not None and - first_sample_timestamp > data['timestamp']): - self.db.resource.update_one( - {'_id': data['resource_id']}, - {'$set': {'first_sample_timestamp': data['timestamp']}} - ) - - # Record the raw data for the meter. Use a copy so we do not - # modify a data structure owned by our caller (the driver adds - # a new key '_id'). - record = copy.copy(data) - record['recorded_at'] = timeutils.utcnow() - - self.db.meter.insert_one(record) - - def clear_expired_metering_data(self, ttl): - """Clear expired data from the backend storage system. - - Clearing occurs with native MongoDB time-to-live feature. - """ - LOG.debug("Clearing expired metering data is based on native " - "MongoDB time to live feature and going in background.") - - @classmethod - def _build_sort_instructions(cls, sort_keys=None, sort_dir='desc'): - """Returns a sort_instruction and paging operator. - - Sort instructions are used in the query to determine what attributes - to sort on and what direction to use. - :param q: The query dict passed in. - :param sort_keys: array of attributes by which results be sorted. - :param sort_dir: direction in which results be sorted (asc, desc). - :return: sort instructions and paging operator - """ - sort_keys = sort_keys or [] - sort_instructions = [] - _sort_dir, operation = cls.SORT_OPERATION_MAPPING.get( - sort_dir, cls.SORT_OPERATION_MAPPING['desc']) - - for _sort_key in sort_keys: - _instruction = (_sort_key, _sort_dir) - sort_instructions.append(_instruction) - - return sort_instructions, operation - - def _get_time_constrained_resources(self, query, - start_timestamp, start_timestamp_op, - end_timestamp, end_timestamp_op, - metaquery, resource, limit): - """Return an iterable of models.Resource instances - - Items are constrained by sample timestamp. - :param query: project/user/source query - :param start_timestamp: modified timestamp start range. - :param start_timestamp_op: start time operator, like gt, ge. - :param end_timestamp: modified timestamp end range. - :param end_timestamp_op: end time operator, like lt, le. - :param metaquery: dict with metadata to match on. - :param resource: resource filter. - """ - if resource is not None: - query['resource_id'] = resource - - # Add resource_ prefix so it matches the field in the db - query.update(dict(('resource_' + k, v) - for (k, v) in six.iteritems(metaquery))) - - # FIXME(dhellmann): This may not perform very well, - # but doing any better will require changing the database - # schema and that will need more thought than I have time - # to put into it today. - # Look for resources matching the above criteria and with - # samples in the time range we care about, then change the - # resource query to return just those resources by id. - ts_range = pymongo_utils.make_timestamp_range(start_timestamp, - end_timestamp, - start_timestamp_op, - end_timestamp_op) - if ts_range: - query['timestamp'] = ts_range - - sort_keys = base._handle_sort_key('resource') - sort_instructions = self._build_sort_instructions(sort_keys)[0] - - # use a unique collection name for the results collection, - # as result post-sorting (as oppposed to reduce pre-sorting) - # is not possible on an inline M-R - out = 'resource_list_%s' % uuid.uuid4() - self.db.meter.map_reduce(self.MAP_RESOURCES, - self.REDUCE_RESOURCES, - out=out, - sort={'resource_id': 1}, - query=query) - - try: - if limit is not None: - results = self.db[out].find(sort=sort_instructions, - limit=limit) - else: - results = self.db[out].find(sort=sort_instructions) - for r in results: - resource = r['value'] - yield models.Resource( - resource_id=r['_id'], - user_id=resource['user_id'], - project_id=resource['project_id'], - first_sample_timestamp=resource['first_timestamp'], - last_sample_timestamp=resource['last_timestamp'], - source=resource['source'], - metadata=pymongo_utils.unquote_keys(resource['metadata'])) - finally: - self.db[out].drop() - - def _get_floating_resources(self, query, metaquery, resource, limit): - """Return an iterable of models.Resource instances - - Items are unconstrained by timestamp. - :param query: project/user/source query - :param metaquery: dict with metadata to match on. - :param resource: resource filter. - """ - if resource is not None: - query['_id'] = resource - - query.update(dict((k, v) - for (k, v) in six.iteritems(metaquery))) - - keys = base._handle_sort_key('resource') - sort_keys = ['last_sample_timestamp' if i == 'timestamp' else i - for i in keys] - sort_instructions = self._build_sort_instructions(sort_keys)[0] - - if limit is not None: - results = self.db.resource.find(query, sort=sort_instructions, - limit=limit) - else: - results = self.db.resource.find(query, sort=sort_instructions) - - for r in results: - yield models.Resource( - resource_id=r['_id'], - user_id=r['user_id'], - project_id=r['project_id'], - first_sample_timestamp=r.get('first_sample_timestamp', - self._GENESIS), - last_sample_timestamp=r.get('last_sample_timestamp', - self._APOCALYPSE), - source=r['source'], - metadata=pymongo_utils.unquote_keys(r['metadata'])) - - def get_resources(self, user=None, project=None, source=None, - start_timestamp=None, start_timestamp_op=None, - end_timestamp=None, end_timestamp_op=None, - metaquery=None, resource=None, limit=None): - """Return an iterable of models.Resource instances - - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param source: Optional source filter. - :param start_timestamp: Optional modified timestamp start range. - :param start_timestamp_op: Optional start time operator, like gt, ge. - :param end_timestamp: Optional modified timestamp end range. - :param end_timestamp_op: Optional end time operator, like lt, le. - :param metaquery: Optional dict with metadata to match on. - :param resource: Optional resource filter. - :param limit: Maximum number of results to return. - """ - if limit == 0: - return - metaquery = pymongo_utils.improve_keys(metaquery, metaquery=True) or {} - - query = {} - if user is not None: - query['user_id'] = user - if project is not None: - query['project_id'] = project - if source is not None: - query['source'] = source - - if start_timestamp or end_timestamp: - return self._get_time_constrained_resources(query, - start_timestamp, - start_timestamp_op, - end_timestamp, - end_timestamp_op, - metaquery, resource, - limit) - else: - return self._get_floating_resources(query, metaquery, resource, - limit) - - @staticmethod - def _make_period_dict(period, first_ts): - """Create a period field for _id of grouped fields. - - :param period: Period duration in seconds - :param first_ts: First timestamp for first period - :return: - """ - if period >= 0: - period_unique_dict = { - "period_start": - { - "$divide": [ - {"$subtract": [ - {"$subtract": ["$timestamp", - first_ts]}, - {"$mod": [{"$subtract": ["$timestamp", - first_ts]}, - period * 1000] - } - ]}, - period * 1000 - ] - } - - } - else: - # Note(ityaptin) Hack for older MongoDB versions (2.4.+ and older). - # Since 2.6+ we could use $literal operator - period_unique_dict = {"$period_start": {"$add": [0, 0]}} - return period_unique_dict - - def get_meter_statistics(self, sample_filter, period=None, groupby=None, - aggregate=None): - """Return an iterable of models.Statistics instance. - - Items are containing meter statistics described by the query - parameters. The filter must have a meter value set. - """ - # NOTE(zqfan): We already have checked at API level, but - # still leave it here in case of directly storage calls. - if aggregate: - for a in aggregate: - if a.func not in self.AGGREGATES: - msg = _('Invalid aggregation function: %s') % a.func - raise storage.StorageBadAggregate(msg) - - if (groupby and set(groupby) - - set(['user_id', 'project_id', 'resource_id', 'source', - 'resource_metadata.instance_type'])): - raise ceilometer.NotImplementedError( - "Unable to group by these fields") - q = pymongo_utils.make_query_from_filter(sample_filter) - - group_stage = {} - project_stage = { - "unit": "$_id.unit", - "name": "$_id.name", - "first_timestamp": "$first_timestamp", - "last_timestamp": "$last_timestamp", - "period_start": "$_id.period_start", - } - - # Add timestamps to $group stage - group_stage.update({"first_timestamp": {"$min": "$timestamp"}, - "last_timestamp": {"$max": "$timestamp"}}) - - # Define a _id field for grouped documents - unique_group_field = {"name": "$counter_name", - "unit": "$counter_unit"} - - # Define a first timestamp for periods - if sample_filter.start_timestamp: - first_timestamp = sample_filter.start_timestamp - else: - first_timestamp_cursor = self.db.meter.find( - limit=1, sort=[('timestamp', - pymongo.ASCENDING)]) - if first_timestamp_cursor.count(): - first_timestamp = first_timestamp_cursor[0]['timestamp'] - else: - first_timestamp = utils.EPOCH_TIME - - # Add a start_period field to unique identifier of grouped documents - if period: - period_dict = self._make_period_dict(period, - first_timestamp) - unique_group_field.update(period_dict) - - # Add a groupby fields to unique identifier of grouped documents - if groupby: - unique_group_field.update(dict((field.replace(".", "/"), - "$%s" % field) - for field in groupby)) - - group_stage.update({"_id": unique_group_field}) - - self._compile_aggregate_stages(aggregate, group_stage, project_stage) - - # Aggregation stages list. It's work one by one and uses documents - # from previous stages. - aggregation_query = [{'$match': q}, - {"$sort": {"timestamp": 1}}, - {"$group": group_stage}, - {"$sort": {"_id.period_start": 1}}, - {"$project": project_stage}] - - # results is dict in pymongo<=2.6.3 and CommandCursor in >=3.0 - results = self.db.meter.aggregate(aggregation_query, - **self._make_aggregation_params()) - return [self._stats_result_to_model(point, groupby, aggregate, - period, first_timestamp) - for point in self._get_results(results)] - - def _stats_result_aggregates(self, result, aggregate): - stats_args = {} - for attr, func in Connection.STANDARD_AGGREGATES.items(): - if attr in result: - stats_args.update(func.finalize(result, - version_array=self.version)) - - if aggregate: - stats_args['aggregate'] = {} - for agr in aggregate: - stats_args['aggregate'].update( - Connection.AGGREGATES[agr.func].finalize( - result, agr.param, self.version)) - return stats_args - - def _stats_result_to_model(self, result, groupby, aggregate, period, - first_timestamp): - if period is None: - period = 0 - first_timestamp = pymongo_utils.from_unix_timestamp(first_timestamp) - stats_args = self._stats_result_aggregates(result, aggregate) - - stats_args['unit'] = result['unit'] - stats_args['duration'] = (result["last_timestamp"] - - result["first_timestamp"]).total_seconds() - stats_args['duration_start'] = result['first_timestamp'] - stats_args['duration_end'] = result['last_timestamp'] - stats_args['period'] = period - start = result.get("period_start", 0) * period - - stats_args['period_start'] = (first_timestamp + - datetime.timedelta(seconds=start)) - stats_args['period_end'] = (first_timestamp + - datetime.timedelta(seconds=start + period) - if period else result['last_timestamp']) - - stats_args['groupby'] = ( - dict((g, result['_id'].get(g.replace(".", "/"))) - for g in groupby) if groupby else None) - return models.Statistics(**stats_args) - - def _compile_aggregate_stages(self, aggregate, group_stage, project_stage): - if not aggregate: - for aggregation in Connection.STANDARD_AGGREGATES.values(): - group_stage.update( - aggregation.group(version_array=self.version) - ) - project_stage.update( - aggregation.project( - version_array=self.version - ) - ) - else: - for description in aggregate: - aggregation = Connection.AGGREGATES.get(description.func) - if aggregation: - if not aggregation.validate(description.param): - raise storage.StorageBadAggregate( - 'Bad aggregate: %s.%s' % (description.func, - description.param)) - group_stage.update( - aggregation.group(description.param, - version_array=self.version) - ) - project_stage.update( - aggregation.project(description.param, - version_array=self.version) - ) - - @staticmethod - def _get_results(results): - if isinstance(results, dict): - return results.get('result', []) - else: - return results - - def _make_aggregation_params(self): - if self.version >= pymongo_utils.COMPLETE_AGGREGATE_COMPATIBLE_VERSION: - return {"allowDiskUse": True} - return {} diff --git a/ceilometer/storage/impl_sqlalchemy.py b/ceilometer/storage/impl_sqlalchemy.py deleted file mode 100644 index 2b6c9af9..00000000 --- a/ceilometer/storage/impl_sqlalchemy.py +++ /dev/null @@ -1,822 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""SQLAlchemy storage backend.""" - -from __future__ import absolute_import -import datetime -import hashlib -import os - -from oslo_config import cfg -from oslo_db import api -from oslo_db import exception as dbexc -from oslo_db.sqlalchemy import session as db_session -from oslo_log import log -from oslo_serialization import jsonutils -from oslo_utils import timeutils -import six -import sqlalchemy as sa -from sqlalchemy import and_ -from sqlalchemy import distinct -from sqlalchemy import func -from sqlalchemy.orm import aliased -from sqlalchemy.sql.expression import cast - -import ceilometer -from ceilometer.i18n import _, _LI -from ceilometer import storage -from ceilometer.storage import base -from ceilometer.storage import models as api_models -from ceilometer.storage.sqlalchemy import models -from ceilometer.storage.sqlalchemy import utils as sql_utils -from ceilometer import utils - -LOG = log.getLogger(__name__) - - -STANDARD_AGGREGATES = dict( - avg=func.avg(models.Sample.volume).label('avg'), - sum=func.sum(models.Sample.volume).label('sum'), - min=func.min(models.Sample.volume).label('min'), - max=func.max(models.Sample.volume).label('max'), - count=func.count(models.Sample.volume).label('count') -) - -UNPARAMETERIZED_AGGREGATES = dict( - stddev=func.stddev_pop(models.Sample.volume).label('stddev') -) - -PARAMETERIZED_AGGREGATES = dict( - validate=dict( - cardinality=lambda p: p in ['resource_id', 'user_id', 'project_id'] - ), - compute=dict( - cardinality=lambda p: func.count( - distinct(getattr(models.Resource, p)) - ).label('cardinality/%s' % p) - ) -) - -AVAILABLE_CAPABILITIES = { - 'meters': {'query': {'simple': True, - 'metadata': True}}, - 'resources': {'query': {'simple': True, - 'metadata': True}}, - 'samples': {'query': {'simple': True, - 'metadata': True, - 'complex': True}}, - 'statistics': {'groupby': True, - 'query': {'simple': True, - 'metadata': True}, - 'aggregation': {'standard': True, - 'selectable': { - 'max': True, - 'min': True, - 'sum': True, - 'avg': True, - 'count': True, - 'stddev': True, - 'cardinality': True}} - }, -} - - -AVAILABLE_STORAGE_CAPABILITIES = { - 'storage': {'production_ready': True}, -} - - -def apply_metaquery_filter(session, query, metaquery): - """Apply provided metaquery filter to existing query. - - :param session: session used for original query - :param query: Query instance - :param metaquery: dict with metadata to match on. - """ - for k, value in six.iteritems(metaquery): - key = k[9:] # strip out 'metadata.' prefix - try: - _model = sql_utils.META_TYPE_MAP[type(value)] - except KeyError: - raise ceilometer.NotImplementedError( - 'Query on %(key)s is of %(value)s ' - 'type and is not supported' % - {"key": k, "value": type(value)}) - else: - meta_alias = aliased(_model) - on_clause = and_(models.Resource.internal_id == meta_alias.id, - meta_alias.meta_key == key) - # outer join is needed to support metaquery - # with or operator on non existent metadata field - # see: test_query_non_existing_metadata_with_result - # test case. - query = query.outerjoin(meta_alias, on_clause) - query = query.filter(meta_alias.value == value) - - return query - - -def make_query_from_filter(session, query, sample_filter, require_meter=True): - """Return a query dictionary based on the settings in the filter. - - :param session: session used for original query - :param query: Query instance - :param sample_filter: SampleFilter instance - :param require_meter: If true and the filter does not have a meter, - raise an error. - """ - - if sample_filter.meter: - query = query.filter(models.Meter.name == sample_filter.meter) - elif require_meter: - raise RuntimeError('Missing required meter specifier') - if sample_filter.source: - query = query.filter( - models.Resource.source_id == sample_filter.source) - if sample_filter.start_timestamp: - ts_start = sample_filter.start_timestamp - if sample_filter.start_timestamp_op == 'gt': - query = query.filter(models.Sample.timestamp > ts_start) - else: - query = query.filter(models.Sample.timestamp >= ts_start) - if sample_filter.end_timestamp: - ts_end = sample_filter.end_timestamp - if sample_filter.end_timestamp_op == 'le': - query = query.filter(models.Sample.timestamp <= ts_end) - else: - query = query.filter(models.Sample.timestamp < ts_end) - if sample_filter.user: - if sample_filter.user == 'None': - sample_filter.user = None - query = query.filter(models.Resource.user_id == sample_filter.user) - if sample_filter.project: - if sample_filter.project == 'None': - sample_filter.project = None - query = query.filter( - models.Resource.project_id == sample_filter.project) - if sample_filter.resource: - query = query.filter( - models.Resource.resource_id == sample_filter.resource) - if sample_filter.message_id: - query = query.filter( - models.Sample.message_id == sample_filter.message_id) - - if sample_filter.metaquery: - query = apply_metaquery_filter(session, query, - sample_filter.metaquery) - - return query - - -class Connection(base.Connection): - """Put the data into a SQLAlchemy database. - - Tables:: - - - meter - - meter definition - - { id: meter id - name: meter name - type: meter type - unit: meter unit - } - - resource - - resource definition - - { internal_id: resource id - resource_id: resource uuid - user_id: user uuid - project_id: project uuid - source_id: source id - resource_metadata: metadata dictionary - metadata_hash: metadata dictionary hash - } - - sample - - the raw incoming data - - { id: sample id - meter_id: meter id (->meter.id) - resource_id: resource id (->resource.internal_id) - volume: sample volume - timestamp: datetime - recorded_at: datetime - message_signature: message signature - message_id: message uuid - } - """ - CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, - AVAILABLE_CAPABILITIES) - STORAGE_CAPABILITIES = utils.update_nested( - base.Connection.STORAGE_CAPABILITIES, - AVAILABLE_STORAGE_CAPABILITIES, - ) - - def __init__(self, url): - # Set max_retries to 0, since oslo.db in certain cases may attempt - # to retry making the db connection retried max_retries ^ 2 times - # in failure case and db reconnection has already been implemented - # in storage.__init__.get_connection_from_config function - options = dict(cfg.CONF.database.items()) - options['max_retries'] = 0 - # oslo.db doesn't support options defined by Ceilometer - for opt in storage.OPTS: - options.pop(opt.name, None) - self._engine_facade = db_session.EngineFacade(url, **options) - - def upgrade(self): - # NOTE(gordc): to minimise memory, only import migration when needed - from oslo_db.sqlalchemy import migration - path = os.path.join(os.path.abspath(os.path.dirname(__file__)), - 'sqlalchemy', 'migrate_repo') - migration.db_sync(self._engine_facade.get_engine(), path) - - def clear(self): - engine = self._engine_facade.get_engine() - for table in reversed(models.Base.metadata.sorted_tables): - engine.execute(table.delete()) - engine.dispose() - - @staticmethod - def _create_meter(conn, name, type, unit): - # TODO(gordc): implement lru_cache to improve performance - try: - meter = models.Meter.__table__ - trans = conn.begin_nested() - if conn.dialect.name == 'sqlite': - trans = conn.begin() - with trans: - meter_row = conn.execute( - sa.select([meter.c.id]) - .where(sa.and_(meter.c.name == name, - meter.c.type == type, - meter.c.unit == unit))).first() - meter_id = meter_row[0] if meter_row else None - if meter_id is None: - result = conn.execute(meter.insert(), name=name, - type=type, unit=unit) - meter_id = result.inserted_primary_key[0] - except dbexc.DBDuplicateEntry: - # retry function to pick up duplicate committed object - meter_id = Connection._create_meter(conn, name, type, unit) - - return meter_id - - @staticmethod - def _create_resource(conn, res_id, user_id, project_id, source_id, - rmeta): - # TODO(gordc): implement lru_cache to improve performance - try: - res = models.Resource.__table__ - m_hash = jsonutils.dumps(rmeta, sort_keys=True) - if six.PY3: - m_hash = m_hash.encode('utf-8') - m_hash = hashlib.md5(m_hash).hexdigest() - trans = conn.begin_nested() - if conn.dialect.name == 'sqlite': - trans = conn.begin() - with trans: - res_row = conn.execute( - sa.select([res.c.internal_id]) - .where(sa.and_(res.c.resource_id == res_id, - res.c.user_id == user_id, - res.c.project_id == project_id, - res.c.source_id == source_id, - res.c.metadata_hash == m_hash))).first() - internal_id = res_row[0] if res_row else None - if internal_id is None: - result = conn.execute(res.insert(), resource_id=res_id, - user_id=user_id, - project_id=project_id, - source_id=source_id, - resource_metadata=rmeta, - metadata_hash=m_hash) - internal_id = result.inserted_primary_key[0] - if rmeta and isinstance(rmeta, dict): - meta_map = {} - for key, v in utils.dict_to_keyval(rmeta): - try: - _model = sql_utils.META_TYPE_MAP[type(v)] - if meta_map.get(_model) is None: - meta_map[_model] = [] - meta_map[_model].append( - {'id': internal_id, 'meta_key': key, - 'value': v}) - except KeyError: - LOG.warning(_("Unknown metadata type. Key " - "(%s) will not be queryable."), - key) - for _model in meta_map.keys(): - conn.execute(_model.__table__.insert(), - meta_map[_model]) - - except dbexc.DBDuplicateEntry: - # retry function to pick up duplicate committed object - internal_id = Connection._create_resource( - conn, res_id, user_id, project_id, source_id, rmeta) - - return internal_id - - @api.wrap_db_retry(retry_interval=cfg.CONF.database.retry_interval, - max_retries=cfg.CONF.database.max_retries, - retry_on_deadlock=True) - def record_metering_data(self, data): - """Write the data to the backend storage system. - - :param data: a dictionary such as returned by - ceilometer.publisher.utils.meter_message_from_counter - """ - engine = self._engine_facade.get_engine() - with engine.begin() as conn: - # Record the raw data for the sample. - m_id = self._create_meter(conn, - data['counter_name'], - data['counter_type'], - data['counter_unit']) - res_id = self._create_resource(conn, - data['resource_id'], - data['user_id'], - data['project_id'], - data['source'], - data['resource_metadata']) - sample = models.Sample.__table__ - conn.execute(sample.insert(), meter_id=m_id, - resource_id=res_id, - timestamp=data['timestamp'], - volume=data['counter_volume'], - message_signature=data['message_signature'], - message_id=data['message_id']) - - def clear_expired_metering_data(self, ttl): - """Clear expired data from the backend storage system. - - Clearing occurs according to the time-to-live. - :param ttl: Number of seconds to keep records for. - """ - # Prevent database deadlocks from occurring by - # using separate transaction for each delete - session = self._engine_facade.get_session() - with session.begin(): - end = timeutils.utcnow() - datetime.timedelta(seconds=ttl) - sample_q = (session.query(models.Sample) - .filter(models.Sample.timestamp < end)) - rows = sample_q.delete() - LOG.info(_LI("%d samples removed from database"), rows) - - if not cfg.CONF.sql_expire_samples_only: - with session.begin(): - # remove Meter definitions with no matching samples - (session.query(models.Meter) - .filter(~models.Meter.samples.any()) - .delete(synchronize_session=False)) - - with session.begin(): - resource_q = (session.query(models.Resource.internal_id) - .filter(~models.Resource.samples.any())) - # mark resource with no matching samples for delete - resource_q.update({models.Resource.metadata_hash: "delete_" - + cast(models.Resource.internal_id, - sa.String)}, - synchronize_session=False) - - # remove metadata of resources marked for delete - for table in [models.MetaText, models.MetaBigInt, - models.MetaFloat, models.MetaBool]: - with session.begin(): - resource_q = (session.query(models.Resource.internal_id) - .filter(models.Resource.metadata_hash - .like('delete_%'))) - resource_subq = resource_q.subquery() - (session.query(table) - .filter(table.id.in_(resource_subq)) - .delete(synchronize_session=False)) - - # remove resource marked for delete - with session.begin(): - resource_q = (session.query(models.Resource.internal_id) - .filter(models.Resource.metadata_hash - .like('delete_%'))) - resource_q.delete(synchronize_session=False) - LOG.info(_LI("Expired residual resource and" - " meter definition data")) - - def get_resources(self, user=None, project=None, source=None, - start_timestamp=None, start_timestamp_op=None, - end_timestamp=None, end_timestamp_op=None, - metaquery=None, resource=None, limit=None): - """Return an iterable of api_models.Resource instances - - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param source: Optional source filter. - :param start_timestamp: Optional modified timestamp start range. - :param start_timestamp_op: Optional start time operator, like gt, ge. - :param end_timestamp: Optional modified timestamp end range. - :param end_timestamp_op: Optional end time operator, like lt, le. - :param metaquery: Optional dict with metadata to match on. - :param resource: Optional resource filter. - :param limit: Maximum number of results to return. - """ - if limit == 0: - return - s_filter = storage.SampleFilter(user=user, - project=project, - source=source, - start_timestamp=start_timestamp, - start_timestamp_op=start_timestamp_op, - end_timestamp=end_timestamp, - end_timestamp_op=end_timestamp_op, - metaquery=metaquery, - resource=resource) - - session = self._engine_facade.get_session() - # get list of resource_ids - has_timestamp = start_timestamp or end_timestamp - # NOTE: When sql_expire_samples_only is enabled, there will be some - # resources without any sample, in such case we should use inner - # join on sample table to avoid wrong result. - if cfg.CONF.sql_expire_samples_only or has_timestamp: - res_q = session.query(distinct(models.Resource.resource_id)).join( - models.Sample, - models.Sample.resource_id == models.Resource.internal_id) - else: - res_q = session.query(distinct(models.Resource.resource_id)) - res_q = make_query_from_filter(session, res_q, s_filter, - require_meter=False) - res_q = res_q.limit(limit) if limit else res_q - for res_id in res_q.all(): - - # get max and min sample timestamp value - min_max_q = (session.query(func.max(models.Sample.timestamp) - .label('max_timestamp'), - func.min(models.Sample.timestamp) - .label('min_timestamp')) - .join(models.Resource, - models.Resource.internal_id == - models.Sample.resource_id) - .filter(models.Resource.resource_id == - res_id[0])) - - min_max_q = make_query_from_filter(session, min_max_q, s_filter, - require_meter=False) - - min_max = min_max_q.first() - - # get resource details for latest sample - res_q = (session.query(models.Resource.resource_id, - models.Resource.user_id, - models.Resource.project_id, - models.Resource.source_id, - models.Resource.resource_metadata) - .join(models.Sample, - models.Sample.resource_id == - models.Resource.internal_id) - .filter(models.Sample.timestamp == - min_max.max_timestamp) - .filter(models.Resource.resource_id == - res_id[0]) - .order_by(models.Sample.id.desc()).limit(1)) - - res = res_q.first() - - yield api_models.Resource( - resource_id=res.resource_id, - project_id=res.project_id, - first_sample_timestamp=min_max.min_timestamp, - last_sample_timestamp=min_max.max_timestamp, - source=res.source_id, - user_id=res.user_id, - metadata=res.resource_metadata - ) - - def get_meters(self, user=None, project=None, resource=None, source=None, - metaquery=None, limit=None, unique=False): - """Return an iterable of api_models.Meter instances - - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param resource: Optional ID of the resource. - :param source: Optional source filter. - :param metaquery: Optional dict with metadata to match on. - :param limit: Maximum number of results to return. - :param unique: If set to true, return only unique meter information. - """ - if limit == 0: - return - s_filter = storage.SampleFilter(user=user, - project=project, - source=source, - metaquery=metaquery, - resource=resource) - - # NOTE(gordc): get latest sample of each meter/resource. we do not - # filter here as we want to filter only on latest record. - session = self._engine_facade.get_session() - - subq = session.query(func.max(models.Sample.id).label('id')).join( - models.Resource, - models.Resource.internal_id == models.Sample.resource_id) - - if unique: - subq = subq.group_by(models.Sample.meter_id) - else: - subq = subq.group_by(models.Sample.meter_id, - models.Resource.resource_id) - - if resource: - subq = subq.filter(models.Resource.resource_id == resource) - subq = subq.subquery() - - # get meter details for samples. - query_sample = (session.query(models.Sample.meter_id, - models.Meter.name, models.Meter.type, - models.Meter.unit, - models.Resource.resource_id, - models.Resource.project_id, - models.Resource.source_id, - models.Resource.user_id).join( - subq, subq.c.id == models.Sample.id) - .join(models.Meter, models.Meter.id == models.Sample.meter_id) - .join(models.Resource, - models.Resource.internal_id == models.Sample.resource_id)) - query_sample = make_query_from_filter(session, query_sample, s_filter, - require_meter=False) - - query_sample = query_sample.limit(limit) if limit else query_sample - - if unique: - for row in query_sample.all(): - yield api_models.Meter( - name=row.name, - type=row.type, - unit=row.unit, - resource_id=None, - project_id=None, - source=None, - user_id=None) - else: - for row in query_sample.all(): - yield api_models.Meter( - name=row.name, - type=row.type, - unit=row.unit, - resource_id=row.resource_id, - project_id=row.project_id, - source=row.source_id, - user_id=row.user_id) - - @staticmethod - def _retrieve_samples(query): - samples = query.all() - - for s in samples: - # Remove the id generated by the database when - # the sample was inserted. It is an implementation - # detail that should not leak outside of the driver. - yield api_models.Sample( - source=s.source_id, - counter_name=s.counter_name, - counter_type=s.counter_type, - counter_unit=s.counter_unit, - counter_volume=s.counter_volume, - user_id=s.user_id, - project_id=s.project_id, - resource_id=s.resource_id, - timestamp=s.timestamp, - recorded_at=s.recorded_at, - resource_metadata=s.resource_metadata, - message_id=s.message_id, - message_signature=s.message_signature, - ) - - def get_samples(self, sample_filter, limit=None): - """Return an iterable of api_models.Samples. - - :param sample_filter: Filter. - :param limit: Maximum number of results to return. - """ - if limit == 0: - return [] - - session = self._engine_facade.get_session() - query = session.query(models.Sample.timestamp, - models.Sample.recorded_at, - models.Sample.message_id, - models.Sample.message_signature, - models.Sample.volume.label('counter_volume'), - models.Meter.name.label('counter_name'), - models.Meter.type.label('counter_type'), - models.Meter.unit.label('counter_unit'), - models.Resource.source_id, - models.Resource.user_id, - models.Resource.project_id, - models.Resource.resource_metadata, - models.Resource.resource_id).join( - models.Meter, models.Meter.id == models.Sample.meter_id).join( - models.Resource, - models.Resource.internal_id == models.Sample.resource_id).order_by( - models.Sample.timestamp.desc()) - query = make_query_from_filter(session, query, sample_filter, - require_meter=False) - if limit: - query = query.limit(limit) - return self._retrieve_samples(query) - - def query_samples(self, filter_expr=None, orderby=None, limit=None): - if limit == 0: - return [] - - session = self._engine_facade.get_session() - engine = self._engine_facade.get_engine() - query = session.query(models.Sample.timestamp, - models.Sample.recorded_at, - models.Sample.message_id, - models.Sample.message_signature, - models.Sample.volume.label('counter_volume'), - models.Meter.name.label('counter_name'), - models.Meter.type.label('counter_type'), - models.Meter.unit.label('counter_unit'), - models.Resource.source_id, - models.Resource.user_id, - models.Resource.project_id, - models.Resource.resource_metadata, - models.Resource.resource_id).join( - models.Meter, models.Meter.id == models.Sample.meter_id).join( - models.Resource, - models.Resource.internal_id == models.Sample.resource_id) - transformer = sql_utils.QueryTransformer(models.FullSample, query, - dialect=engine.dialect.name) - if filter_expr is not None: - transformer.apply_filter(filter_expr) - - transformer.apply_options(orderby, limit) - return self._retrieve_samples(transformer.get_query()) - - @staticmethod - def _get_aggregate_functions(aggregate): - if not aggregate: - return [f for f in STANDARD_AGGREGATES.values()] - - functions = [] - - for a in aggregate: - if a.func in STANDARD_AGGREGATES: - functions.append(STANDARD_AGGREGATES[a.func]) - elif a.func in UNPARAMETERIZED_AGGREGATES: - functions.append(UNPARAMETERIZED_AGGREGATES[a.func]) - elif a.func in PARAMETERIZED_AGGREGATES['compute']: - validate = PARAMETERIZED_AGGREGATES['validate'].get(a.func) - if not (validate and validate(a.param)): - raise storage.StorageBadAggregate('Bad aggregate: %s.%s' - % (a.func, a.param)) - compute = PARAMETERIZED_AGGREGATES['compute'][a.func] - functions.append(compute(a.param)) - else: - # NOTE(zqfan): We already have checked at API level, but - # still leave it here in case of directly storage calls. - msg = _('Invalid aggregation function: %s') % a.func - raise storage.StorageBadAggregate(msg) - - return functions - - def _make_stats_query(self, sample_filter, groupby, aggregate): - - select = [ - func.min(models.Sample.timestamp).label('tsmin'), - func.max(models.Sample.timestamp).label('tsmax'), - models.Meter.unit - ] - select.extend(self._get_aggregate_functions(aggregate)) - - session = self._engine_facade.get_session() - - if groupby: - group_attributes = [] - for g in groupby: - if g != 'resource_metadata.instance_type': - group_attributes.append(getattr(models.Resource, g)) - else: - group_attributes.append( - getattr(models.MetaText, 'value') - .label('resource_metadata.instance_type')) - - select.extend(group_attributes) - - query = ( - session.query(*select) - .join(models.Meter, - models.Meter.id == models.Sample.meter_id) - .join(models.Resource, - models.Resource.internal_id == models.Sample.resource_id) - .group_by(models.Meter.unit)) - - if groupby: - for g in groupby: - if g == 'resource_metadata.instance_type': - query = query.join( - models.MetaText, - models.Resource.internal_id == models.MetaText.id) - query = query.filter( - models.MetaText.meta_key == 'instance_type') - query = query.group_by(*group_attributes) - - return make_query_from_filter(session, query, sample_filter) - - @staticmethod - def _stats_result_aggregates(result, aggregate): - stats_args = {} - if isinstance(result.count, six.integer_types): - stats_args['count'] = result.count - for attr in ['min', 'max', 'sum', 'avg']: - if hasattr(result, attr): - stats_args[attr] = getattr(result, attr) - if aggregate: - stats_args['aggregate'] = {} - for a in aggregate: - key = '%s%s' % (a.func, '/%s' % a.param if a.param else '') - stats_args['aggregate'][key] = getattr(result, key) - return stats_args - - @staticmethod - def _stats_result_to_model(result, period, period_start, - period_end, groupby, aggregate): - stats_args = Connection._stats_result_aggregates(result, aggregate) - stats_args['unit'] = result.unit - duration = (timeutils.delta_seconds(result.tsmin, result.tsmax) - if result.tsmin is not None and result.tsmax is not None - else None) - stats_args['duration'] = duration - stats_args['duration_start'] = result.tsmin - stats_args['duration_end'] = result.tsmax - stats_args['period'] = period - stats_args['period_start'] = period_start - stats_args['period_end'] = period_end - stats_args['groupby'] = (dict( - (g, getattr(result, g)) for g in groupby) if groupby else None) - return api_models.Statistics(**stats_args) - - def get_meter_statistics(self, sample_filter, period=None, groupby=None, - aggregate=None): - """Return an iterable of api_models.Statistics instances. - - Items are containing meter statistics described by the query - parameters. The filter must have a meter value set. - """ - if groupby: - for group in groupby: - if group not in ['user_id', 'project_id', 'resource_id', - 'resource_metadata.instance_type']: - raise ceilometer.NotImplementedError('Unable to group by ' - 'these fields') - - if not period: - for res in self._make_stats_query(sample_filter, - groupby, - aggregate): - if res.count: - yield self._stats_result_to_model(res, 0, - res.tsmin, res.tsmax, - groupby, - aggregate) - return - - if not (sample_filter.start_timestamp and sample_filter.end_timestamp): - res = self._make_stats_query(sample_filter, - None, - aggregate).first() - if not res: - # NOTE(liusheng):The 'res' may be NoneType, because no - # sample has found with sample filter(s). - return - - query = self._make_stats_query(sample_filter, groupby, aggregate) - # HACK(jd) This is an awful method to compute stats by period, but - # since we're trying to be SQL agnostic we have to write portable - # code, so here it is, admire! We're going to do one request to get - # stats by period. We would like to use GROUP BY, but there's no - # portable way to manipulate timestamp in SQL, so we can't. - for period_start, period_end in base.iter_period( - sample_filter.start_timestamp or res.tsmin, - sample_filter.end_timestamp or res.tsmax, - period): - q = query.filter(models.Sample.timestamp >= period_start) - q = q.filter(models.Sample.timestamp < period_end) - for r in q.all(): - if r.count: - yield self._stats_result_to_model( - result=r, - period=int(timeutils.delta_seconds(period_start, - period_end)), - period_start=period_start, - period_end=period_end, - groupby=groupby, - aggregate=aggregate - ) diff --git a/ceilometer/storage/models.py b/ceilometer/storage/models.py deleted file mode 100644 index 816a4c5d..00000000 --- a/ceilometer/storage/models.py +++ /dev/null @@ -1,148 +0,0 @@ -# -# Copyright 2013 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Model classes for use in the storage API. -""" -from ceilometer.storage import base - - -class Resource(base.Model): - """Something for which sample data has been collected.""" - - def __init__(self, resource_id, project_id, - first_sample_timestamp, - last_sample_timestamp, - source, user_id, metadata): - """Create a new resource. - - :param resource_id: UUID of the resource - :param project_id: UUID of project owning the resource - :param first_sample_timestamp: first sample timestamp captured - :param last_sample_timestamp: last sample timestamp captured - :param source: the identifier for the user/project id definition - :param user_id: UUID of user owning the resource - :param metadata: most current metadata for the resource (a dict) - """ - base.Model.__init__(self, - resource_id=resource_id, - first_sample_timestamp=first_sample_timestamp, - last_sample_timestamp=last_sample_timestamp, - project_id=project_id, - source=source, - user_id=user_id, - metadata=metadata, - ) - - -class Meter(base.Model): - """Definition of a meter for which sample data has been collected.""" - - def __init__(self, name, type, unit, resource_id, project_id, source, - user_id): - """Create a new meter. - - :param name: name of the meter - :param type: type of the meter (gauge, delta, cumulative) - :param unit: unit of the meter - :param resource_id: UUID of the resource - :param project_id: UUID of project owning the resource - :param source: the identifier for the user/project id definition - :param user_id: UUID of user owning the resource - """ - base.Model.__init__(self, - name=name, - type=type, - unit=unit, - resource_id=resource_id, - project_id=project_id, - source=source, - user_id=user_id, - ) - - -class Sample(base.Model): - """One collected data point.""" - def __init__(self, - source, - counter_name, counter_type, counter_unit, counter_volume, - user_id, project_id, resource_id, - timestamp, resource_metadata, - message_id, - message_signature, - recorded_at, - ): - """Create a new sample. - - :param source: the identifier for the user/project id definition - :param counter_name: the name of the measurement being taken - :param counter_type: the type of the measurement - :param counter_unit: the units for the measurement - :param counter_volume: the measured value - :param user_id: the user that triggered the measurement - :param project_id: the project that owns the resource - :param resource_id: the thing on which the measurement was taken - :param timestamp: the time of the measurement - :param resource_metadata: extra details about the resource - :param message_id: a message identifier - :param recorded_at: sample record timestamp - :param message_signature: a hash created from the rest of the - message data - """ - base.Model.__init__(self, - source=source, - counter_name=counter_name, - counter_type=counter_type, - counter_unit=counter_unit, - counter_volume=counter_volume, - user_id=user_id, - project_id=project_id, - resource_id=resource_id, - timestamp=timestamp, - resource_metadata=resource_metadata, - message_id=message_id, - message_signature=message_signature, - recorded_at=recorded_at) - - -class Statistics(base.Model): - """Computed statistics based on a set of sample data.""" - def __init__(self, unit, - period, period_start, period_end, - duration, duration_start, duration_end, - groupby, **data): - """Create a new statistics object. - - :param unit: The unit type of the data set - :param period: The length of the time range covered by these stats - :param period_start: The timestamp for the start of the period - :param period_end: The timestamp for the end of the period - :param duration: The total time for the matching samples - :param duration_start: The earliest time for the matching samples - :param duration_end: The latest time for the matching samples - :param groupby: The fields used to group the samples. - :param data: some or all of the following aggregates - min: The smallest volume found - max: The largest volume found - avg: The average of all volumes found - sum: The total of all volumes found - count: The number of samples found - aggregate: name-value pairs for selectable aggregates - """ - base.Model.__init__(self, unit=unit, - period=period, period_start=period_start, - period_end=period_end, duration=duration, - duration_start=duration_start, - duration_end=duration_end, - groupby=groupby, - **data) diff --git a/ceilometer/storage/mongo/utils.py b/ceilometer/storage/mongo/utils.py index 3c1195a2..2b41872e 100644 --- a/ceilometer/storage/mongo/utils.py +++ b/ceilometer/storage/mongo/utils.py @@ -18,7 +18,6 @@ """Common functions for MongoDB backend """ -import datetime import time import weakref @@ -28,7 +27,6 @@ from oslo_utils import netutils import pymongo import pymongo.errors import six -from six.moves.urllib import parse from ceilometer.i18n import _, _LI @@ -43,13 +41,6 @@ OP_SIGN = {'lt': '$lt', 'le': '$lte', 'ne': '$ne', 'gt': '$gt', 'ge': '$gte'} MINIMUM_COMPATIBLE_MONGODB_VERSION = [2, 4] COMPLETE_AGGREGATE_COMPATIBLE_VERSION = [2, 6] -FINALIZE_FLOAT_LAMBDA = lambda result, param=None: float(result) -FINALIZE_INT_LAMBDA = lambda result, param=None: int(result) -CARDINALITY_VALIDATION = (lambda name, param: param in ['resource_id', - 'user_id', - 'project_id', - 'source']) - def make_timestamp_range(start, end, start_timestamp_op=None, end_timestamp_op=None): @@ -125,118 +116,6 @@ def make_events_query_from_filter(event_filter): return query -def make_query_from_filter(sample_filter, require_meter=True): - """Return a query dictionary based on the settings in the filter. - - :param sample_filter: SampleFilter instance - :param require_meter: If true and the filter does not have a meter, - raise an error. - """ - q = {} - - if sample_filter.user: - q['user_id'] = sample_filter.user - if sample_filter.project: - q['project_id'] = sample_filter.project - - if sample_filter.meter: - q['counter_name'] = sample_filter.meter - elif require_meter: - raise RuntimeError('Missing required meter specifier') - - ts_range = make_timestamp_range(sample_filter.start_timestamp, - sample_filter.end_timestamp, - sample_filter.start_timestamp_op, - sample_filter.end_timestamp_op) - - if ts_range: - q['timestamp'] = ts_range - - if sample_filter.resource: - q['resource_id'] = sample_filter.resource - if sample_filter.source: - q['source'] = sample_filter.source - if sample_filter.message_id: - q['message_id'] = sample_filter.message_id - - # so the samples call metadata resource_metadata, so we convert - # to that. - q.update(dict( - ('resource_%s' % k, v) for (k, v) in six.iteritems( - improve_keys(sample_filter.metaquery, metaquery=True)))) - return q - - -def quote_key(key, reverse=False): - """Prepare key for storage data in MongoDB. - - :param key: key that should be quoted - :param reverse: boolean, True --- if we need a reverse order of the keys - parts - :return: iter of quoted part of the key - """ - r = -1 if reverse else 1 - - for k in key.split('.')[::r]: - if k.startswith('$'): - k = parse.quote(k) - yield k - - -def improve_keys(data, metaquery=False): - """Improves keys in dict if they contained '.' or started with '$'. - - :param data: is a dictionary where keys need to be checked and improved - :param metaquery: boolean, if True dots are not escaped from the keys - :return: improved dictionary if keys contained dots or started with '$': - {'a.b': 'v'} -> {'a': {'b': 'v'}} - {'$ab': 'v'} -> {'%24ab': 'v'} - """ - if not isinstance(data, dict): - return data - - if metaquery: - for key in six.iterkeys(data): - if '.$' in key: - key_list = [] - for k in quote_key(key): - key_list.append(k) - new_key = '.'.join(key_list) - data[new_key] = data.pop(key) - else: - for key, value in data.items(): - if isinstance(value, dict): - improve_keys(value) - if '.' in key: - new_dict = {} - for k in quote_key(key, reverse=True): - new = {} - new[k] = new_dict if new_dict else data.pop(key) - new_dict = new - data.update(new_dict) - else: - if key.startswith('$'): - new_key = parse.quote(key) - data[new_key] = data.pop(key) - return data - - -def unquote_keys(data): - """Restores initial view of 'quoted' keys in dictionary data - - :param data: is a dictionary - :return: data with restored keys if they were 'quoted'. - """ - if isinstance(data, dict): - for key, value in data.items(): - if isinstance(value, dict): - unquote_keys(value) - if key.startswith('%24'): - k = parse.unquote(key) - data[k] = data.pop(key) - return data - - class ConnectionPool(object): def __init__(self): @@ -272,130 +151,6 @@ class ConnectionPool(object): raise -class QueryTransformer(object): - - operators = {"<": "$lt", - ">": "$gt", - "<=": "$lte", - "=<": "$lte", - ">=": "$gte", - "=>": "$gte", - "!=": "$ne", - "in": "$in", - "=~": "$regex"} - - complex_operators = {"or": "$or", - "and": "$and"} - - ordering_functions = {"asc": pymongo.ASCENDING, - "desc": pymongo.DESCENDING} - - def transform_orderby(self, orderby): - orderby_filter = [] - - for field in orderby: - field_name = list(field.keys())[0] - ordering = self.ordering_functions[list(field.values())[0]] - orderby_filter.append((field_name, ordering)) - return orderby_filter - - @staticmethod - def _move_negation_to_leaf(condition): - """Moves every not operator to the leafs. - - Moving is going by applying the De Morgan rules and annihilating - double negations. - """ - def _apply_de_morgan(tree, negated_subtree, negated_op): - if negated_op == "and": - new_op = "or" - else: - new_op = "and" - - tree[new_op] = [{"not": child} - for child in negated_subtree[negated_op]] - del tree["not"] - - def transform(subtree): - op = list(subtree.keys())[0] - if op in ["and", "or"]: - [transform(child) for child in subtree[op]] - elif op == "not": - negated_tree = subtree[op] - negated_op = list(negated_tree.keys())[0] - if negated_op == "and": - _apply_de_morgan(subtree, negated_tree, negated_op) - transform(subtree) - elif negated_op == "or": - _apply_de_morgan(subtree, negated_tree, negated_op) - transform(subtree) - elif negated_op == "not": - # two consecutive not annihilates themselves - value = list(negated_tree.values())[0] - new_op = list(value.keys())[0] - subtree[new_op] = negated_tree[negated_op][new_op] - del subtree["not"] - transform(subtree) - - transform(condition) - - def transform_filter(self, condition): - # in Mongo not operator can only be applied to - # simple expressions so we have to move every - # not operator to the leafs of the expression tree - self._move_negation_to_leaf(condition) - return self._process_json_tree(condition) - - def _handle_complex_op(self, complex_op, nodes): - element_list = [] - for node in nodes: - element = self._process_json_tree(node) - element_list.append(element) - complex_operator = self.complex_operators[complex_op] - op = {complex_operator: element_list} - return op - - def _handle_not_op(self, negated_tree): - # assumes that not is moved to the leaf already - # so we are next to a leaf - negated_op = list(negated_tree.keys())[0] - negated_field = list(negated_tree[negated_op].keys())[0] - value = negated_tree[negated_op][negated_field] - if negated_op == "=": - return {negated_field: {"$ne": value}} - elif negated_op == "!=": - return {negated_field: value} - else: - return {negated_field: {"$not": - {self.operators[negated_op]: value}}} - - def _handle_simple_op(self, simple_op, nodes): - field_name = list(nodes.keys())[0] - field_value = list(nodes.values())[0] - - # no operator for equal in Mongo - if simple_op == "=": - op = {field_name: field_value} - return op - - operator = self.operators[simple_op] - op = {field_name: {operator: field_value}} - return op - - def _process_json_tree(self, condition_tree): - operator_node = list(condition_tree.keys())[0] - nodes = list(condition_tree.values())[0] - - if operator_node in self.complex_operators: - return self._handle_complex_op(operator_node, nodes) - - if operator_node == "not": - negated_tree = condition_tree[operator_node] - return self._handle_not_op(negated_tree) - - return self._handle_simple_op(operator_node, nodes) - - def safe_mongo_call(call): def closure(*args, **kwargs): # NOTE(idegtiarov) options max_retries and retry_interval have been @@ -507,135 +262,3 @@ class CursorProxy(pymongo.cursor.Cursor): def __getattr__(self, item): return getattr(self.cursor, item) - - -class AggregationFields(object): - def __init__(self, version, - group, - project, - finalize=None, - parametrized=False, - validate=None): - self._finalize = finalize or FINALIZE_FLOAT_LAMBDA - self.group = lambda *args: group(*args) if parametrized else group - self.project = (lambda *args: project(*args) - if parametrized else project) - self.version = version - self.validate = validate or (lambda name, param: True) - - def finalize(self, name, data, param=None): - field = ("%s" % name) + ("/%s" % param if param else "") - return {field: (self._finalize(data.get(field)) - if self._finalize else data.get(field))} - - -class Aggregation(object): - def __init__(self, name, aggregation_fields): - self.name = name - aggregation_fields = (aggregation_fields - if isinstance(aggregation_fields, list) - else [aggregation_fields]) - self.aggregation_fields = sorted(aggregation_fields, - key=lambda af: getattr(af, "version"), - reverse=True) - - def _get_compatible_aggregation_field(self, version_array): - if version_array: - version_array = version_array[0:2] - else: - version_array = MINIMUM_COMPATIBLE_MONGODB_VERSION - for aggregation_field in self.aggregation_fields: - if version_array >= aggregation_field.version: - return aggregation_field - - def group(self, param=None, version_array=None): - af = self._get_compatible_aggregation_field(version_array) - return af.group(param) - - def project(self, param=None, version_array=None): - af = self._get_compatible_aggregation_field(version_array) - return af.project(param) - - def finalize(self, data, param=None, version_array=None): - af = self._get_compatible_aggregation_field(version_array) - return af.finalize(self.name, data, param) - - def validate(self, param=None, version_array=None): - af = self._get_compatible_aggregation_field(version_array) - return af.validate(self.name, param) - -SUM_AGGREGATION = Aggregation( - "sum", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, - {"sum": {"$sum": "$counter_volume"}}, - {"sum": "$sum"}, - )) -AVG_AGGREGATION = Aggregation( - "avg", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, - {"avg": {"$avg": "$counter_volume"}}, - {"avg": "$avg"}, - )) -MIN_AGGREGATION = Aggregation( - "min", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, - {"min": {"$min": "$counter_volume"}}, - {"min": "$min"}, - )) -MAX_AGGREGATION = Aggregation( - "max", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, - {"max": {"$max": "$counter_volume"}}, - {"max": "$max"}, - )) -COUNT_AGGREGATION = Aggregation( - "count", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, - {"count": {"$sum": 1}}, - {"count": "$count"}, - FINALIZE_INT_LAMBDA)) -STDDEV_AGGREGATION = Aggregation( - "stddev", - AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, - {"std_square": { - "$sum": { - "$multiply": ["$counter_volume", - "$counter_volume"] - }}, - "std_count": {"$sum": 1}, - "std_sum": {"$sum": "$counter_volume"}}, - {"stddev": { - "count": "$std_count", - "sum": "$std_sum", - "square_sum": "$std_square"}}, - lambda stddev: ((stddev['square_sum'] - * stddev['count'] - - stddev["sum"] ** 2) ** 0.5 - / stddev['count']))) - -CARDINALITY_AGGREGATION = Aggregation( - "cardinality", - # $cond operator available only in MongoDB 2.6+ - [AggregationFields(COMPLETE_AGGREGATE_COMPATIBLE_VERSION, - lambda field: ({"cardinality/%s" % field: - {"$addToSet": "$%s" % field}}), - lambda field: { - "cardinality/%s" % field: { - "$cond": [ - {"$eq": ["$cardinality/%s" % field, None]}, - 0, - {"$size": "$cardinality/%s" % field}] - }}, - validate=CARDINALITY_VALIDATION, - parametrized=True), - AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION, - lambda field: ({"cardinality/%s" % field: - {"$addToSet": "$%s" % field}}), - lambda field: ({"cardinality/%s" % field: - "$cardinality/%s" % field}), - finalize=len, - validate=CARDINALITY_VALIDATION, - parametrized=True)] -) - - -def from_unix_timestamp(timestamp): - if (isinstance(timestamp, six.integer_types) or - isinstance(timestamp, float)): - return datetime.datetime.fromtimestamp(timestamp) - return timestamp diff --git a/ceilometer/storage/pymongo_base.py b/ceilometer/storage/pymongo_base.py deleted file mode 100644 index bbb49ac5..00000000 --- a/ceilometer/storage/pymongo_base.py +++ /dev/null @@ -1,178 +0,0 @@ -# -# Copyright Ericsson AB 2013. All rights reserved -# -# Authors: Ildiko Vancsa -# Balazs Gibizer -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Common functions for MongoDB backend.""" -import pymongo - -from ceilometer.storage import base -from ceilometer.storage import models -from ceilometer.storage.mongo import utils as pymongo_utils -from ceilometer import utils - - -COMMON_AVAILABLE_CAPABILITIES = { - 'meters': {'query': {'simple': True, - 'metadata': True}}, - 'samples': {'query': {'simple': True, - 'metadata': True, - 'complex': True}}, -} - - -AVAILABLE_STORAGE_CAPABILITIES = { - 'storage': {'production_ready': True}, -} - - -class Connection(base.Connection): - """Base Connection class for MongoDB driver.""" - CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, - COMMON_AVAILABLE_CAPABILITIES) - - STORAGE_CAPABILITIES = utils.update_nested( - base.Connection.STORAGE_CAPABILITIES, - AVAILABLE_STORAGE_CAPABILITIES, - ) - - def get_meters(self, user=None, project=None, resource=None, source=None, - metaquery=None, limit=None, unique=False): - """Return an iterable of models.Meter instances - - :param user: Optional ID for user that owns the resource. - :param project: Optional ID for project that owns the resource. - :param resource: Optional resource filter. - :param source: Optional source filter. - :param metaquery: Optional dict with metadata to match on. - :param limit: Maximum number of results to return. - :param unique: If set to true, return only unique meter information. - """ - if limit == 0: - return - - metaquery = pymongo_utils.improve_keys(metaquery, metaquery=True) or {} - - q = {} - if user == 'None': - q['user_id'] = None - elif user is not None: - q['user_id'] = user - if project == 'None': - q['project_id'] = None - elif project is not None: - q['project_id'] = project - if resource == 'None': - q['_id'] = None - elif resource is not None: - q['_id'] = resource - if source is not None: - q['source'] = source - q.update(metaquery) - - count = 0 - if unique: - meter_names = set() - - for r in self.db.resource.find(q): - for r_meter in r['meter']: - if unique: - if r_meter['counter_name'] in meter_names: - continue - else: - meter_names.add(r_meter['counter_name']) - - if limit and count >= limit: - return - else: - count += 1 - - if unique: - yield models.Meter( - name=r_meter['counter_name'], - type=r_meter['counter_type'], - # Return empty string if 'counter_unit' is not valid - # for backward compatibility. - unit=r_meter.get('counter_unit', ''), - resource_id=None, - project_id=None, - source=None, - user_id=None) - else: - yield models.Meter( - name=r_meter['counter_name'], - type=r_meter['counter_type'], - # Return empty string if 'counter_unit' is not valid - # for backward compatibility. - unit=r_meter.get('counter_unit', ''), - resource_id=r['_id'], - project_id=r['project_id'], - source=r['source'], - user_id=r['user_id']) - - def get_samples(self, sample_filter, limit=None): - """Return an iterable of model.Sample instances. - - :param sample_filter: Filter. - :param limit: Maximum number of results to return. - """ - if limit == 0: - return [] - q = pymongo_utils.make_query_from_filter(sample_filter, - require_meter=False) - - return self._retrieve_samples(q, - [("timestamp", pymongo.DESCENDING)], - limit) - - def query_samples(self, filter_expr=None, orderby=None, limit=None): - if limit == 0: - return [] - query_filter = {} - orderby_filter = [("timestamp", pymongo.DESCENDING)] - transformer = pymongo_utils.QueryTransformer() - if orderby is not None: - orderby_filter = transformer.transform_orderby(orderby) - if filter_expr is not None: - query_filter = transformer.transform_filter(filter_expr) - - return self._retrieve_samples(query_filter, orderby_filter, limit) - - def _retrieve_samples(self, query, orderby, limit): - if limit is not None: - samples = self.db.meter.find(query, - limit=limit, - sort=orderby) - else: - samples = self.db.meter.find(query, - sort=orderby) - - for s in samples: - # Remove the ObjectId generated by the database when - # the sample was inserted. It is an implementation - # detail that should not leak outside of the driver. - del s['_id'] - # Backward compatibility for samples without units - s['counter_unit'] = s.get('counter_unit', '') - # Compatibility with MongoDB 3.+ - s['counter_volume'] = float(s.get('counter_volume')) - # Tolerate absence of recorded_at in older datapoints - s['recorded_at'] = s.get('recorded_at') - # Check samples for metadata and "unquote" key if initially it - # was started with '$'. - if s.get('resource_metadata'): - s['resource_metadata'] = pymongo_utils.unquote_keys( - s.get('resource_metadata')) - yield models.Sample(**s) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/README b/ceilometer/storage/sqlalchemy/migrate_repo/README deleted file mode 100644 index 42bddd18..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/README +++ /dev/null @@ -1,4 +0,0 @@ -sqlalchemy-migrate is DEPRECATED. - -All new migrations should be written using alembic. -Please see ceilometer/storage/sqlalchemy/alembic/README diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/__init__.py b/ceilometer/storage/sqlalchemy/migrate_repo/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/manage.py b/ceilometer/storage/sqlalchemy/migrate_repo/manage.py deleted file mode 100644 index 39fa3892..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/manage.py +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env python -from migrate.versioning.shell import main - -if __name__ == '__main__': - main(debug='False') diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg b/ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg deleted file mode 100644 index cd16764f..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg +++ /dev/null @@ -1,25 +0,0 @@ -[db_settings] -# Used to identify which repository this database is versioned under. -# You can use the name of your project. -repository_id=ceilometer - -# The name of the database table used to track the schema version. -# This name shouldn't already be used by your project. -# If this is changed once a database is under version control, you'll need to -# change the table name in each database too. -version_table=migrate_version - -# When committing a change script, Migrate will attempt to generate the -# sql for all supported databases; normally, if one of them fails - probably -# because you don't have that database installed - it is ignored and the -# commit continues, perhaps ending successfully. -# Databases in this list MUST compile successfully during a commit, or the -# entire commit will fail. List the databases your application will actually -# be using to ensure your updates to that database work properly. -# This must be a list; example: ['postgres','sqlite'] -required_dbs=[] - -# When creating new change scripts, Migrate will stamp the new script with -# a version number. By default this is latest_version + 1. You can set this -# to 'true' to tell Migrate to use the UTC timestamp instead. -use_timestamp_numbering=False diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py deleted file mode 100644 index 1032cb40..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py +++ /dev/null @@ -1,95 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column -from sqlalchemy import DateTime -from sqlalchemy import Index -from sqlalchemy import Integer -from sqlalchemy import MetaData -from sqlalchemy import String -from sqlalchemy import Table -from sqlalchemy import UniqueConstraint - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - - meter = Table( - 'meter', meta, - Column('id', Integer, primary_key=True, index=True), - Column('counter_name', String(255)), - Column('user_id', String(255), index=True), - Column('project_id', String(255), index=True), - Column('resource_id', String(255)), - Column('resource_metadata', String(5000)), - Column('counter_type', String(255)), - Column('counter_volume', Integer), - Column('counter_duration', Integer), - Column('timestamp', DateTime(timezone=False), index=True), - Column('message_signature', String(1000)), - Column('message_id', String(1000)), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - resource = Table( - 'resource', meta, - Column('id', String(255), primary_key=True, index=True), - Column('resource_metadata', String(5000)), - Column('project_id', String(255), index=True), - Column('received_timestamp', DateTime(timezone=False)), - Column('timestamp', DateTime(timezone=False), index=True), - Column('user_id', String(255), index=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - user = Table( - 'user', meta, - Column('id', String(255), primary_key=True, index=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - project = Table( - 'project', meta, - Column('id', String(255), primary_key=True, index=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - sourceassoc = Table( - 'sourceassoc', meta, - Column('source_id', String(255), index=True), - Column('user_id', String(255)), - Column('project_id', String(255)), - Column('resource_id', String(255)), - Column('meter_id', Integer), - Index('idx_su', 'source_id', 'user_id'), - Index('idx_sp', 'source_id', 'project_id'), - Index('idx_sr', 'source_id', 'resource_id'), - Index('idx_sm', 'source_id', 'meter_id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - source = Table( - 'source', meta, - Column('id', String(255), primary_key=True, index=True), - UniqueConstraint('id'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - - tables = [meter, project, resource, user, source, sourceassoc] - for i in sorted(tables, key=lambda table: table.fullname): - i.create() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py deleted file mode 100644 index 667654ef..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column -from sqlalchemy import Integer -from sqlalchemy import MetaData -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - meter = Table('meter', meta, autoload=True) - duration = Column('counter_duration', Integer) - meter.drop_column(duration) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py deleted file mode 100644 index fecd65c5..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2012 Canonical. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def upgrade(migrate_engine): - - if migrate_engine.name == "mysql": - tables = ['meter', 'user', 'resource', 'project', 'source', - 'sourceassoc'] - migrate_engine.execute("SET foreign_key_checks = 0") - - for table in tables: - migrate_engine.execute( - "ALTER TABLE %s CONVERT TO CHARACTER SET utf8" % table) - migrate_engine.execute("SET foreign_key_checks = 1") - migrate_engine.execute( - "ALTER DATABASE %s DEFAULT CHARACTER SET utf8" % - migrate_engine.url.database) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py deleted file mode 100644 index ac4b1cb6..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column -from sqlalchemy import MetaData -from sqlalchemy import String -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - meter = Table('meter', meta, autoload=True) - unit = Column('counter_unit', String(255)) - meter.create_column(unit) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.py deleted file mode 100644 index d85c7d73..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.py +++ /dev/null @@ -1,24 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from sqlalchemy import MetaData, Table, Column, DateTime - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - resource = Table('resource', meta, autoload=True) - timestamp = Column('timestamp', DateTime) - resource.drop_column(timestamp) - received_timestamp = Column('received_timestamp', DateTime) - resource.drop_column(received_timestamp) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py deleted file mode 100644 index 36a44846..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py +++ /dev/null @@ -1,25 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2013 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Float -from sqlalchemy import MetaData -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - meter = Table('meter', meta, autoload=True) - meter.c.counter_volume.alter(type=Float(53)) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py deleted file mode 100644 index 55f7f820..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py +++ /dev/null @@ -1,46 +0,0 @@ -# -# Copyright 2013 eNovance -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, Table, Column, Text -from sqlalchemy import Boolean, Integer, String, DateTime, Float - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - alarm = Table( - 'alarm', meta, - Column('id', String(255), primary_key=True, index=True), - Column('enabled', Boolean), - Column('name', Text()), - Column('description', Text()), - Column('timestamp', DateTime(timezone=False)), - Column('counter_name', String(255), index=True), - Column('user_id', String(255), index=True), - Column('project_id', String(255), index=True), - Column('comparison_operator', String(2)), - Column('threshold', Float), - Column('statistic', String(255)), - Column('evaluation_periods', Integer), - Column('period', Integer), - Column('state', String(255)), - Column('state_timestamp', DateTime(timezone=False)), - Column('ok_actions', Text()), - Column('alarm_actions', Text()), - Column('insufficient_data_actions', Text()), - Column('matching_metadata', Text()), - mysql_engine='InnoDB', - mysql_charset='utf8') - alarm.create() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py deleted file mode 100644 index 68119f4a..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py +++ /dev/null @@ -1,60 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column -from sqlalchemy import Float -from sqlalchemy import ForeignKey -from sqlalchemy import Integer -from sqlalchemy import MetaData -from sqlalchemy import String -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - - unique_name = Table( - 'unique_name', meta, - Column('id', Integer, primary_key=True), - Column('key', String(32), index=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - unique_name.create() - - event = Table( - 'event', meta, - Column('id', Integer, primary_key=True), - Column('generated', Float(asdecimal=True), index=True), - Column('unique_name_id', Integer, ForeignKey('unique_name.id')), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - event.create() - - trait = Table( - 'trait', meta, - Column('id', Integer, primary_key=True), - Column('name_id', Integer, ForeignKey('unique_name.id')), - Column('t_type', Integer, index=True), - Column('t_string', String(32), nullable=True, default=None, - index=True), - Column('t_float', Float, nullable=True, default=None, index=True), - Column('t_int', Integer, nullable=True, default=None, index=True), - Column('t_datetime', Float(asdecimal=True), nullable=True, - default=None, index=True), - Column('event_id', Integer, ForeignKey('event.id')), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - trait.create() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py deleted file mode 100644 index b02f781a..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py +++ /dev/null @@ -1,24 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData -from sqlalchemy import Table -from sqlalchemy import VARCHAR - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - name = Table('unique_name', meta, autoload=True) - name.c.key.alter(type=VARCHAR(length=255)) - trait = Table('trait', meta, autoload=True) - trait.c.t_string.alter(type=VARCHAR(length=255)) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py deleted file mode 100644 index 1ca58c6f..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py +++ /dev/null @@ -1,23 +0,0 @@ -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - meter = sa.Table('meter', meta, autoload=True) - index = sa.Index('idx_meter_rid_cname', meter.c.resource_id, - meter.c.counter_name) - index.create(bind=migrate_engine) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py deleted file mode 100644 index f5f2728a..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py +++ /dev/null @@ -1,37 +0,0 @@ -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Index, MetaData, Table - - -INDEXES = { - # `table_name`: ((`index_name`, `column`),) - "user": (('ix_user_id', 'id'),), - "source": (('ix_source_id', 'id'),), - "project": (('ix_project_id', 'id'),), - "meter": (('ix_meter_id', 'id'),), - "alarm": (('ix_alarm_id', 'id'),), - "resource": (('ix_resource_id', 'id'),) -} - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - load_tables = dict((table_name, Table(table_name, meta, autoload=True)) - for table_name in INDEXES.keys()) - for table_name, indexes in INDEXES.items(): - table = load_tables[table_name] - for index_name, column in indexes: - index = Index(index_name, table.c[column]) - index.drop() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py deleted file mode 100644 index fa77c311..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py +++ /dev/null @@ -1,58 +0,0 @@ -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from migrate import ForeignKeyConstraint -from sqlalchemy import MetaData, Table -from sqlalchemy.sql.expression import select - -TABLES = ['resource', 'sourceassoc', 'user', - 'project', 'meter', 'source', 'alarm'] - -INDEXES = { - "resource": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id')), - "sourceassoc": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id'), - ('resource_id', 'resource', 'id'), - ('meter_id', 'meter', 'id'), - ('source_id', 'source', 'id')), - "alarm": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id')), - "meter": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id'), - ('resource_id', 'resource', 'id'),) -} - - -def upgrade(migrate_engine): - if migrate_engine.name == 'sqlite': - return - meta = MetaData(bind=migrate_engine) - load_tables = dict((table_name, Table(table_name, meta, autoload=True)) - for table_name in TABLES) - for table_name, indexes in INDEXES.items(): - table = load_tables[table_name] - for column, ref_table_name, ref_column_name in indexes: - ref_table = load_tables[ref_table_name] - subq = select([getattr(ref_table.c, ref_column_name)]) - sql_del = table.delete().where( - ~ getattr(table.c, column).in_(subq)) - migrate_engine.execute(sql_del) - - params = {'columns': [table.c[column]], - 'refcolumns': [ref_table.c[ref_column_name]]} - if migrate_engine.name == 'mysql': - params['name'] = "_".join(('fk', table_name, column)) - fkey = ForeignKeyConstraint(**params) - fkey.create() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.py deleted file mode 100644 index c35ba173..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.py +++ /dev/null @@ -1,23 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, Table - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - alarm = Table('alarm', meta, autoload=True) - alarm.c.counter_name.alter(name='meter_name') diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py deleted file mode 100644 index f3c0c09f..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py +++ /dev/null @@ -1,44 +0,0 @@ -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from migrate.changeset.constraint import UniqueConstraint -import sqlalchemy - - -def upgrade(migrate_engine): - meta = sqlalchemy.MetaData(bind=migrate_engine) - - event = sqlalchemy.Table('event', meta, autoload=True) - message_id = sqlalchemy.Column('message_id', sqlalchemy.String(50)) - event.create_column(message_id) - - cons = UniqueConstraint('message_id', table=event) - cons.create() - - index = sqlalchemy.Index('idx_event_message_id', event.c.message_id) - index.create(bind=migrate_engine) - - # Populate the new column ... - trait = sqlalchemy.Table('trait', meta, autoload=True) - unique_name = sqlalchemy.Table('unique_name', meta, autoload=True) - join = trait.join(unique_name, unique_name.c.id == trait.c.name_id) - traits = sqlalchemy.select([trait.c.event_id, trait.c.t_string], - whereclause=(unique_name.c.key == 'message_id'), - from_obj=join) - - for event_id, value in traits.execute(): - (event.update().where(event.c.id == event_id).values(message_id=value). - execute()) - - # Leave the Trait, makes the rollback easier and won't really hurt anyone. diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py deleted file mode 100644 index 9a9f07ad..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py +++ /dev/null @@ -1,63 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from migrate import ForeignKeyConstraint -from sqlalchemy import MetaData, Table, Column, Index -from sqlalchemy import String, DateTime - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - - project = Table('project', meta, autoload=True) - user = Table('user', meta, autoload=True) - - alarm_history = Table( - 'alarm_history', meta, - Column('event_id', String(255), primary_key=True, index=True), - Column('alarm_id', String(255)), - Column('on_behalf_of', String(255)), - Column('project_id', String(255)), - Column('user_id', String(255)), - Column('type', String(20)), - Column('detail', String(255)), - Column('timestamp', DateTime(timezone=False)), - mysql_engine='InnoDB', - mysql_charset='utf8') - - alarm_history.create() - - if migrate_engine.name in ['mysql', 'postgresql']: - indices = [Index('ix_alarm_history_alarm_id', - alarm_history.c.alarm_id), - Index('ix_alarm_history_on_behalf_of', - alarm_history.c.on_behalf_of), - Index('ix_alarm_history_project_id', - alarm_history.c.project_id), - Index('ix_alarm_history_on_user_id', - alarm_history.c.user_id)] - - for index in indices: - index.create(migrate_engine) - - fkeys = [ForeignKeyConstraint(columns=[alarm_history.c.on_behalf_of], - refcolumns=[project.c.id]), - ForeignKeyConstraint(columns=[alarm_history.c.project_id], - refcolumns=[project.c.id]), - ForeignKeyConstraint(columns=[alarm_history.c.user_id], - refcolumns=[user.c.id])] - for fkey in fkeys: - fkey.create(engine=migrate_engine) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py deleted file mode 100644 index f82ab5ec..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py +++ /dev/null @@ -1,60 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from sqlalchemy import MetaData, Table, Column, Index -from sqlalchemy import String, Text - - -def upgrade(migrate_engine): - meta = MetaData() - meta.bind = migrate_engine - table = Table('alarm', meta, autoload=True) - - type = Column('type', String(50), default='threshold') - type.create(table, populate_default=True) - - rule = Column('rule', Text()) - rule.create(table) - - for row in table.select().execute().fetchall(): - query = [] - if row.matching_metadata is not None: - matching_metadata = json.loads(row.matching_metadata) - for key in matching_metadata: - query.append({'field': key, - 'op': 'eq', - 'value': matching_metadata[key]}) - rule = { - 'meter_name': row.meter_name, - 'comparison_operator': row.comparison_operator, - 'threshold': row.threshold, - 'statistic': row.statistic, - 'evaluation_periods': row.evaluation_periods, - 'period': row.period, - 'query': query - } - table.update().where(table.c.id == row.id).values(rule=rule).execute() - - index = Index('ix_alarm_counter_name', table.c.meter_name) - index.drop(bind=migrate_engine) - table.c.meter_name.drop() - table.c.comparison_operator.drop() - table.c.threshold.drop() - table.c.statistic.drop() - table.c.evaluation_periods.drop() - table.c.period.drop() - table.c.matching_metadata.drop() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.py deleted file mode 100644 index f5e58d94..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.py +++ /dev/null @@ -1,54 +0,0 @@ -# -# Copyright 2013 Rackspace Hosting -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - -from ceilometer.storage.sqlalchemy import migration -from ceilometer.storage.sqlalchemy import models - -_col = 'timestamp' - - -def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False): - temp_col_n = 'convert_data_type_temp_col' - # Override column we're going to convert with from_t, since the type we're - # replacing could be custom and we need to tell SQLALchemy how to perform - # CRUD operations with it. - table = sa.Table(table.name, table.metadata, sa.Column(col, from_t), - extend_existing=True) - sa.Column(temp_col_n, to_t).create(table) - - key_attr = getattr(table.c, pk_attr) - orig_col = getattr(table.c, col) - new_col = getattr(table.c, temp_col_n) - - query = sa.select([key_attr, orig_col]) - for key, value in migration.paged(query): - (table.update().where(key_attr == key).values({temp_col_n: value}). - execute()) - - orig_col.drop() - new_col.alter(name=col) - if index: - sa.Index('ix_%s_%s' % (table.name, col), new_col).create() - - -def upgrade(migrate_engine): - if migrate_engine.name == 'mysql': - meta = sa.MetaData(bind=migrate_engine) - meter = sa.Table('meter', meta, autoload=True) - _convert_data_type(meter, _col, sa.DateTime(), - models.PreciseTimestamp(), - pk_attr='id', index=True) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.py deleted file mode 100644 index 76c1fa2a..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.py +++ /dev/null @@ -1,26 +0,0 @@ - -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData -from sqlalchemy import Table -from sqlalchemy import Text - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - resource = Table('resource', meta, autoload=True) - resource.c.resource_metadata.alter(type=Text) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py deleted file mode 100644 index 539d02fa..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py +++ /dev/null @@ -1,26 +0,0 @@ - -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData -from sqlalchemy import Table -from sqlalchemy import Text - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - alm_hist = Table('alarm_history', meta, autoload=True) - alm_hist.c.detail.alter(type=Text) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py deleted file mode 100644 index 0748dcff..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py +++ /dev/null @@ -1,68 +0,0 @@ -# -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import json - -import six -from sqlalchemy import Boolean -from sqlalchemy import Column -from sqlalchemy import Float -from sqlalchemy import ForeignKey -from sqlalchemy import Integer -from sqlalchemy import MetaData -from sqlalchemy.sql import select -from sqlalchemy import String -from sqlalchemy import Table -from sqlalchemy import Text - -from ceilometer import utils - -tables = [('metadata_text', Text, True), - ('metadata_bool', Boolean, False), - ('metadata_int', Integer, False), - ('metadata_float', Float, False)] - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - meter = Table('meter', meta, autoload=True) - meta_tables = {} - for t_name, t_type, t_nullable in tables: - meta_tables[t_name] = Table( - t_name, meta, - Column('id', Integer, ForeignKey('meter.id'), primary_key=True), - Column('meta_key', String(255), index=True, primary_key=True), - Column('value', t_type, nullable=t_nullable), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - meta_tables[t_name].create() - - for row in select([meter]).execute(): - if row['resource_metadata']: - meter_id = row['id'] - rmeta = json.loads(row['resource_metadata']) - for key, v in utils.dict_to_keyval(rmeta): - ins = None - if isinstance(v, six.string_types) or v is None: - ins = meta_tables['metadata_text'].insert() - elif isinstance(v, bool): - ins = meta_tables['metadata_bool'].insert() - elif isinstance(v, six.integer_types): - ins = meta_tables['metadata_int'].insert() - elif isinstance(v, float): - ins = meta_tables['metadata_float'].insert() - if ins is not None: - ins.values(id=meter_id, meta_key=key, value=v).execute() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py deleted file mode 100644 index 056f3f5c..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py +++ /dev/null @@ -1,77 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from migrate import ForeignKeyConstraint -from sqlalchemy import Column -from sqlalchemy import Integer -from sqlalchemy import MetaData -from sqlalchemy import select -from sqlalchemy import String -from sqlalchemy import Table - -from ceilometer.storage.sqlalchemy import migration - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - event_type = Table( - 'event_type', meta, - Column('id', Integer, primary_key=True), - Column('desc', String(255), unique=True), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - event_type.create() - event = Table('event', meta, autoload=True) - unique_name = Table('unique_name', meta, autoload=True) - - # Event type is a specialization of Unique name, so - # we insert into the event_type table all the distinct - # unique names from the event.unique_name field along - # with the key from the unique_name table, and - # then rename the event.unique_name field to event.event_type - conn = migrate_engine.connect() - sql = ("INSERT INTO event_type " - "SELECT unique_name.id, unique_name.key FROM event " - "INNER JOIN unique_name " - "ON event.unique_name_id = unique_name.id " - "GROUP BY unique_name.id") - conn.execute(sql) - conn.close() - # Now we need to drop the foreign key constraint, rename - # the event.unique_name column, and re-add a new foreign - # key constraint - params = {'columns': [event.c.unique_name_id], - 'refcolumns': [unique_name.c.id]} - if migrate_engine.name == 'mysql': - params['name'] = "event_ibfk_1" - fkey = ForeignKeyConstraint(**params) - fkey.drop() - - Column('event_type_id', Integer).create(event) - - # Move data from unique_name_id column into event_type_id column - # and delete the entry from the unique_name table - query = select([event.c.id, event.c.unique_name_id]) - for key, value in migration.paged(query): - (event.update().where(event.c.id == key). - values({"event_type_id": value}).execute()) - unique_name.delete().where(unique_name.c.id == key).execute() - - params = {'columns': [event.c.event_type_id], - 'refcolumns': [event_type.c.id]} - if migrate_engine.name == 'mysql': - params['name'] = "_".join(('fk', 'event_type', 'id')) - fkey = ForeignKeyConstraint(**params) - fkey.create() - - event.c.unique_name_id.drop() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_upgrade.sql b/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_upgrade.sql deleted file mode 100644 index 19030113..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_upgrade.sql +++ /dev/null @@ -1,29 +0,0 @@ -CREATE TABLE event_type ( - id INTEGER PRIMARY KEY ASC, - desc STRING NOT NULL -); - -INSERT INTO event_type -SELECT un.id, un.key -FROM unique_name un -JOIN event e ON un.id = e.unique_name_id -GROUP BY un.id; - -ALTER TABLE event RENAME TO event_orig; - -CREATE TABLE event ( - id INTEGER PRIMARY KEY ASC, - generated FLOAT NOT NULL, - message_id VARCHAR(50) UNIQUE, - event_type_id INTEGER NOT NULL, - FOREIGN KEY (event_type_id) REFERENCES event_type (id) -); - -INSERT INTO event -SELECT id, generated, message_id, unique_name_id -FROM event_orig; - -DROP TABLE event_orig; - -DELETE FROM unique_name -WHERE id IN (SELECT id FROM event_type); diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py deleted file mode 100644 index ebbb6e0c..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py +++ /dev/null @@ -1,26 +0,0 @@ - -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import BigInteger -from sqlalchemy import MetaData -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - resource = Table('metadata_int', meta, autoload=True) - resource.c.value.alter(type=BigInteger) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py deleted file mode 100644 index 23c864bc..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py +++ /dev/null @@ -1,86 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from migrate import ForeignKeyConstraint -from sqlalchemy import Column -from sqlalchemy import Integer -from sqlalchemy import MetaData -from sqlalchemy import select -from sqlalchemy import String -from sqlalchemy import Table -from sqlalchemy import UniqueConstraint - -from ceilometer.storage.sqlalchemy import migration - - -def upgrade(migrate_engine): - meta = MetaData(migrate_engine) - trait_type = Table( - 'trait_type', meta, - Column('id', Integer, primary_key=True), - Column('desc', String(255)), - Column('data_type', Integer), - UniqueConstraint('desc', 'data_type', name="tt_unique"), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - trait = Table('trait', meta, autoload=True) - unique_name = Table('unique_name', meta, autoload=True) - trait_type.create(migrate_engine) - # Trait type extracts data from Trait and Unique name. - # We take all trait names from Unique Name, and data types - # from Trait. We then remove dtype and name from trait, and - # remove the name field. - - conn = migrate_engine.connect() - sql = ("INSERT INTO trait_type " - "SELECT unique_name.id, unique_name.key, trait.t_type FROM trait " - "INNER JOIN unique_name " - "ON trait.name_id = unique_name.id " - "GROUP BY unique_name.id, unique_name.key, trait.t_type") - conn.execute(sql) - conn.close() - - # Now we need to drop the foreign key constraint, rename - # the trait.name column, and re-add a new foreign - # key constraint - params = {'columns': [trait.c.name_id], - 'refcolumns': [unique_name.c.id]} - if migrate_engine.name == 'mysql': - params['name'] = "trait_ibfk_1" # foreign key to the unique name table - fkey = ForeignKeyConstraint(**params) - fkey.drop() - - Column('trait_type_id', Integer).create(trait) - - # Move data from name_id column into trait_type_id column - query = select([trait.c.id, trait.c.name_id]) - for key, value in migration.paged(query): - (trait.update().where(trait.c.id == key). - values({"trait_type_id": value}).execute()) - - trait.c.name_id.drop() - - params = {'columns': [trait.c.trait_type_id], - 'refcolumns': [trait_type.c.id]} - if migrate_engine.name == 'mysql': - params['name'] = "_".join(('fk', 'trait_type', 'id')) - - fkey = ForeignKeyConstraint(**params) - fkey.create() - - # Drop the t_type column to data_type. - trait.c.t_type.drop() - - # Finally, drop the unique_name table - we don't need it - # anymore. - unique_name.drop() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_upgrade.sql b/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_upgrade.sql deleted file mode 100644 index ac4dfc7f..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_upgrade.sql +++ /dev/null @@ -1,34 +0,0 @@ -ALTER TABLE trait RENAME TO trait_orig; - -CREATE TABLE trait_type ( - id INTEGER PRIMARY KEY ASC, - 'desc' STRING NOT NULL, - data_type INTEGER NOT NULL, - UNIQUE ('desc', data_type) -); - -INSERT INTO trait_type -SELECT un.id, un.key, t.t_type -FROM unique_name un -JOIN trait_orig t ON un.id = t.name_id -GROUP BY un.id; - -CREATE TABLE trait ( - id INTEGER PRIMARY KEY ASC, - t_string VARCHAR(255), - t_int INTEGER, - t_float FLOAT, - t_datetime FLOAT, - trait_type_id INTEGER NOT NULL, - event_id INTEGER NOT NULL, - FOREIGN KEY (trait_type_id) REFERENCES trait_type (id) - FOREIGN KEY (event_id) REFERENCES event (id) -); - -INSERT INTO trait -SELECT t.id, t.t_string, t.t_int, t.t_float, t.t_datetime, t.name_id, - t.event_id -FROM trait_orig t; - -DROP TABLE trait_orig; -DROP TABLE unique_name; \ No newline at end of file diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py deleted file mode 100644 index e97f24bb..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py +++ /dev/null @@ -1,56 +0,0 @@ -# -# Copyright 2013 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - -from ceilometer.storage.sqlalchemy import migration -from ceilometer.storage.sqlalchemy import models - - -def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False): - temp_col_n = 'convert_data_type_temp_col' - # Override column we're going to convert with from_t, since the type we're - # replacing could be custom and we need to tell SQLALchemy how to perform - # CRUD operations with it. - table = sa.Table(table.name, table.metadata, sa.Column(col, from_t), - extend_existing=True) - sa.Column(temp_col_n, to_t).create(table) - - key_attr = getattr(table.c, pk_attr) - orig_col = getattr(table.c, col) - new_col = getattr(table.c, temp_col_n) - - query = sa.select([key_attr, orig_col]) - for key, value in migration.paged(query): - (table.update().where(key_attr == key).values({temp_col_n: value}). - execute()) - - orig_col.drop() - new_col.alter(name=col) - if index: - sa.Index('ix_%s_%s' % (table.name, col), new_col).create() - - -def upgrade(migrate_engine): - if migrate_engine.name == 'mysql': - meta = sa.MetaData(bind=migrate_engine) - event = sa.Table('event', meta, autoload=True) - _convert_data_type(event, 'generated', sa.Float(), - models.PreciseTimestamp(), - pk_attr='id', index=True) - trait = sa.Table('trait', meta, autoload=True) - _convert_data_type(trait, 't_datetime', sa.Float(), - models.PreciseTimestamp(), - pk_attr='id', index=True) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py deleted file mode 100644 index 457a9fd5..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py +++ /dev/null @@ -1,58 +0,0 @@ -# -# Copyright 2013 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - -from ceilometer.storage.sqlalchemy import migration -from ceilometer.storage.sqlalchemy import models - - -def _convert_data_type(table, col, from_t, to_t, pk_attr='id'): - temp_col_n = 'convert_data_type_temp_col' - # Override column we're going to convert with from_t, since the type we're - # replacing could be custom and we need to tell SQLALchemy how to perform - # CRUD operations with it. - table = sa.Table(table.name, table.metadata, sa.Column(col, from_t), - extend_existing=True) - sa.Column(temp_col_n, to_t).create(table) - - key_attr = getattr(table.c, pk_attr) - orig_col = getattr(table.c, col) - new_col = getattr(table.c, temp_col_n) - - query = sa.select([key_attr, orig_col]) - for key, value in migration.paged(query): - (table.update().where(key_attr == key).values({temp_col_n: value}). - execute()) - - orig_col.drop() - new_col.alter(name=col) - - -to_convert = [ - ('alarm', 'timestamp', 'id'), - ('alarm', 'state_timestamp', 'id'), - ('alarm_history', 'timestamp', 'alarm_id'), -] - - -def upgrade(migrate_engine): - if migrate_engine.name == 'mysql': - meta = sa.MetaData(bind=migrate_engine) - for table_name, col_name, pk_attr in to_convert: - table = sa.Table(table_name, meta, autoload=True) - _convert_data_type(table, col_name, sa.DateTime(), - models.PreciseTimestamp(), - pk_attr=pk_attr) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py deleted file mode 100644 index 959c1fb6..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py +++ /dev/null @@ -1,24 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Float -from sqlalchemy import MetaData -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - metadata_float = Table('metadata_float', meta, autoload=True) - metadata_float.c.value.alter(type=Float(53)) - trait = Table('trait', meta, autoload=True) - trait.c.t_float.alter(type=Float(53)) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.py deleted file mode 100644 index 98377628..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.py +++ /dev/null @@ -1,42 +0,0 @@ -# -# Copyright 2014 Intel Crop. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from migrate import ForeignKeyConstraint -from sqlalchemy import MetaData, Table - -TABLES = ['user', 'project', 'alarm'] - -INDEXES = { - "alarm": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id')), -} - - -def upgrade(migrate_engine): - if migrate_engine.name == 'sqlite': - return - meta = MetaData(bind=migrate_engine) - load_tables = dict((table_name, Table(table_name, meta, autoload=True)) - for table_name in TABLES) - for table_name, indexes in INDEXES.items(): - table = load_tables[table_name] - for column, ref_table_name, ref_column_name in indexes: - ref_table = load_tables[ref_table_name] - params = {'columns': [table.c[column]], - 'refcolumns': [ref_table.c[ref_column_name]]} - if migrate_engine.name == 'mysql': - params['name'] = "_".join(('fk', table_name, column)) - fkey = ForeignKeyConstraint(**params) - fkey.drop() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py deleted file mode 100644 index 1778a0b2..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py +++ /dev/null @@ -1,138 +0,0 @@ -# -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import migrate -import sqlalchemy as sa - - -def get_alembic_version(meta): - """Return Alembic version or None if no Alembic table exists.""" - try: - a_ver = sa.Table( - 'alembic_version', - meta, - autoload=True) - return sa.select([a_ver.c.version_num]).scalar() - except sa.exc.NoSuchTableError: - return None - - -def delete_alembic(meta): - try: - sa.Table( - 'alembic_version', - meta, - autoload=True).drop(checkfirst=True) - except sa.exc.NoSuchTableError: - pass - - -INDEXES = ( - # ([dialects], table_name, index_name, create/delete, uniq/not_uniq) - (['mysql', 'sqlite', 'postgresql'], - 'resource', - 'resource_user_id_project_id_key', - ('user_id', 'project_id'), True, False, True), - (['mysql'], 'source', 'id', ('id',), False, True, False)) - - -def index_cleanup(meta, table_name, uniq_name, columns, - create, unique, limited): - table = sa.Table(table_name, meta, autoload=True) - if create: - if limited and meta.bind.engine.name == 'mysql': - # For some versions of mysql we can get an error - # "Specified key was too long; max key length is 1000 bytes". - # We should create an index by hand in this case with limited - # length of columns. - columns_mysql = ",".join((c + "(100)" for c in columns)) - sql = ("create index %s ON %s (%s)" % (uniq_name, table, - columns_mysql)) - meta.bind.engine.execute(sql) - else: - cols = [table.c[col] for col in columns] - sa.Index(uniq_name, *cols, unique=unique).create() - else: - if unique: - migrate.UniqueConstraint(*columns, table=table, - name=uniq_name).drop() - else: - cols = [table.c[col] for col in columns] - sa.Index(uniq_name, *cols).drop() - - -def change_uniq(meta): - uniq_name = 'uniq_sourceassoc0meter_id0user_id' - columns = ('meter_id', 'user_id') - - if meta.bind.engine.name == 'sqlite': - return - - sourceassoc = sa.Table('sourceassoc', meta, autoload=True) - meter = sa.Table('meter', meta, autoload=True) - user = sa.Table('user', meta, autoload=True) - if meta.bind.engine.name == 'mysql': - # For mysql dialect all dependent FK should be removed - # before renaming of constraint. - params = {'columns': [sourceassoc.c.meter_id], - 'refcolumns': [meter.c.id], - 'name': 'fk_sourceassoc_meter_id'} - migrate.ForeignKeyConstraint(**params).drop() - params = {'columns': [sourceassoc.c.user_id], - 'refcolumns': [user.c.id], - 'name': 'fk_sourceassoc_user_id'} - migrate.ForeignKeyConstraint(**params).drop() - - migrate.UniqueConstraint(*columns, table=sourceassoc, - name=uniq_name).create() - if meta.bind.engine.name == 'mysql': - params = {'columns': [sourceassoc.c.meter_id], - 'refcolumns': [meter.c.id], - 'name': 'fk_sourceassoc_meter_id'} - migrate.ForeignKeyConstraint(**params).create() - params = {'columns': [sourceassoc.c.user_id], - 'refcolumns': [user.c.id], - 'name': 'fk_sourceassoc_user_id'} - migrate.ForeignKeyConstraint(**params).create() - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - a_ver = get_alembic_version(meta) - - if not a_ver: - alarm = sa.Table('alarm', meta, autoload=True) - repeat_act = sa.Column('repeat_actions', sa.Boolean, - server_default=sa.sql.expression.false()) - alarm.create_column(repeat_act) - a_ver = '43b1a023dfaa' - - if a_ver == '43b1a023dfaa': - meter = sa.Table('meter', meta, autoload=True) - meter.c.resource_metadata.alter(type=sa.Text) - a_ver = '17738166b91' - - if a_ver == '17738166b91': - for (engine_names, table_name, uniq_name, - columns, create, uniq, limited) in INDEXES: - if migrate_engine.name in engine_names: - index_cleanup(meta, table_name, uniq_name, - columns, create, uniq, limited) - a_ver = 'b6ae66d05e3' - - if a_ver == 'b6ae66d05e3': - change_uniq(meta) - - delete_alembic(meta) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py deleted file mode 100644 index 0c692bfa..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py +++ /dev/null @@ -1,24 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_utils import timeutils -import sqlalchemy - -from ceilometer.storage.sqlalchemy import models - - -def upgrade(migrate_engine): - meta = sqlalchemy.MetaData(bind=migrate_engine) - meter = sqlalchemy.Table('meter', meta, autoload=True) - c = sqlalchemy.Column('recorded_at', models.PreciseTimestamp(), - default=timeutils.utcnow) - meter.create_column(c) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py deleted file mode 100644 index 39ecf057..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py +++ /dev/null @@ -1,110 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import migrate -import sqlalchemy as sa - - -def _handle_meter_indices(meta): - if meta.bind.engine.name == 'sqlite': - return - - resource = sa.Table('resource', meta, autoload=True) - project = sa.Table('project', meta, autoload=True) - user = sa.Table('user', meta, autoload=True) - meter = sa.Table('meter', meta, autoload=True) - - indices = [(sa.Index('ix_meter_timestamp', meter.c.timestamp), - sa.Index('ix_sample_timestamp', meter.c.timestamp)), - (sa.Index('ix_meter_user_id', meter.c.user_id), - sa.Index('ix_sample_user_id', meter.c.user_id)), - (sa.Index('ix_meter_project_id', meter.c.project_id), - sa.Index('ix_sample_project_id', meter.c.project_id)), - (sa.Index('idx_meter_rid_cname', meter.c.resource_id, - meter.c.counter_name), - sa.Index('idx_sample_rid_cname', meter.c.resource_id, - meter.c.counter_name))] - - fk_params = [({'columns': [meter.c.resource_id], - 'refcolumns': [resource.c.id]}, - 'fk_meter_resource_id', - 'fk_sample_resource_id'), - ({'columns': [meter.c.project_id], - 'refcolumns': [project.c.id]}, - 'fk_meter_project_id', - 'fk_sample_project_id'), - ({'columns': [meter.c.user_id], - 'refcolumns': [user.c.id]}, - 'fk_meter_user_id', - 'fk_sample_user_id')] - - for fk in fk_params: - params = fk[0] - if meta.bind.engine.name == 'mysql': - params['name'] = fk[1] - migrate.ForeignKeyConstraint(**params).drop() - - for meter_ix, sample_ix in indices: - meter_ix.drop() - sample_ix.create() - - for fk in fk_params: - params = fk[0] - if meta.bind.engine.name == 'mysql': - params['name'] = fk[2] - migrate.ForeignKeyConstraint(**params).create() - - -def _alter_sourceassoc(meta, t_name, ix_name, post_action=False): - if meta.bind.engine.name == 'sqlite': - return - - sourceassoc = sa.Table('sourceassoc', meta, autoload=True) - table = sa.Table(t_name, meta, autoload=True) - user = sa.Table('user', meta, autoload=True) - - c_name = '%s_id' % t_name - col = getattr(sourceassoc.c, c_name) - uniq_name = 'uniq_sourceassoc0%s0user_id' % c_name - - uniq_cols = (c_name, 'user_id') - param = {'columns': [col], - 'refcolumns': [table.c.id]} - user_param = {'columns': [sourceassoc.c.user_id], - 'refcolumns': [user.c.id]} - if meta.bind.engine.name == 'mysql': - param['name'] = 'fk_sourceassoc_%s' % c_name - user_param['name'] = 'fk_sourceassoc_user_id' - - actions = [migrate.ForeignKeyConstraint(**user_param), - migrate.ForeignKeyConstraint(**param), - sa.Index(ix_name, sourceassoc.c.source_id, col), - migrate.UniqueConstraint(*uniq_cols, table=sourceassoc, - name=uniq_name)] - for action in actions: - action.create() if post_action else action.drop() - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - - _handle_meter_indices(meta) - meter = sa.Table('meter', meta, autoload=True) - meter.rename('sample') - - _alter_sourceassoc(meta, 'meter', 'idx_sm') - sourceassoc = sa.Table('sourceassoc', meta, autoload=True) - sourceassoc.c.meter_id.alter(name='sample_id') - # re-bind metadata to pick up alter name change - meta = sa.MetaData(bind=migrate_engine) - _alter_sourceassoc(meta, 'sample', 'idx_ss', True) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py deleted file mode 100644 index 3dd8e469..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py +++ /dev/null @@ -1,87 +0,0 @@ -# -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import migrate -import sqlalchemy as sa - - -def handle_rid_index(meta): - if meta.bind.engine.name == 'sqlite': - return - - resource = sa.Table('resource', meta, autoload=True) - sample = sa.Table('sample', meta, autoload=True) - params = {'columns': [sample.c.resource_id], - 'refcolumns': [resource.c.id], - 'name': 'fk_sample_resource_id'} - if meta.bind.engine.name == 'mysql': - # For mysql dialect all dependent FK should be removed - # before index create/delete - migrate.ForeignKeyConstraint(**params).drop() - - index = sa.Index('idx_sample_rid_cname', sample.c.resource_id, - sample.c.counter_name) - index.drop() - - if meta.bind.engine.name == 'mysql': - migrate.ForeignKeyConstraint(**params).create() - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - meter = sa.Table( - 'meter', meta, - sa.Column('id', sa.Integer, primary_key=True), - sa.Column('name', sa.String(255), nullable=False), - sa.Column('type', sa.String(255)), - sa.Column('unit', sa.String(255)), - sa.UniqueConstraint('name', 'type', 'unit', name='def_unique'), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - meter.create() - sample = sa.Table('sample', meta, autoload=True) - query = sa.select([sample.c.counter_name, sample.c.counter_type, - sample.c.counter_unit]).distinct() - for row in query.execute(): - meter.insert().values(name=row['counter_name'], - type=row['counter_type'], - unit=row['counter_unit']).execute() - - meter_id = sa.Column('meter_id', sa.Integer) - meter_id.create(sample) - params = {'columns': [sample.c.meter_id], - 'refcolumns': [meter.c.id]} - if migrate_engine.name == 'mysql': - params['name'] = 'fk_sample_meter_id' - if migrate_engine.name != 'sqlite': - migrate.ForeignKeyConstraint(**params).create() - - index = sa.Index('ix_meter_name', meter.c.name) - index.create(bind=migrate_engine) - - for row in sa.select([meter]).execute(): - (sample.update(). - where(sa.and_(sample.c.counter_name == row['name'], - sample.c.counter_type == row['type'], - sample.c.counter_unit == row['unit'])). - values({sample.c.meter_id: row['id']}).execute()) - - handle_rid_index(meta) - - sample.c.counter_name.drop() - sample.c.counter_type.drop() - sample.c.counter_unit.drop() - sample.c.counter_volume.alter(name='volume') diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.py deleted file mode 100644 index ec0b537c..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.py +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column -from sqlalchemy import MetaData -from sqlalchemy import Table -from sqlalchemy import Text - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - alarm = Table('alarm', meta, autoload=True) - time_constraints = Column('time_constraints', Text()) - alarm.create_column(time_constraints) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py deleted file mode 100644 index bb0264eb..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py +++ /dev/null @@ -1,21 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - users = Table('alarm', meta, autoload=True) - users.c.id.alter(name='alarm_id') diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py deleted file mode 100644 index ba4e3160..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py +++ /dev/null @@ -1,33 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import sqlalchemy as sa - -TABLES_012 = ['resource', 'sourceassoc', 'user', - 'project', 'meter', 'source', 'alarm'] -TABLES_027 = ['user', 'project', 'alarm'] - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - for table_name in TABLES_027: - try: - (sa.Table('dump027_' + table_name, meta, autoload=True). - drop(checkfirst=True)) - except sa.exc.NoSuchTableError: - pass - for table_name in TABLES_012: - try: - (sa.Table('dump_' + table_name, meta, autoload=True). - drop(checkfirst=True)) - except sa.exc.NoSuchTableError: - pass diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py deleted file mode 100644 index e58915af..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py +++ /dev/null @@ -1,84 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from migrate import ForeignKeyConstraint, UniqueConstraint -import sqlalchemy as sa - -TABLES_DROP = ['user', 'project'] -TABLES = ['user', 'project', 'sourceassoc', 'sample', - 'resource', 'alarm_history'] - -INDEXES = { - "sample": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id')), - "sourceassoc": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id')), - "resource": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id')), - "alarm_history": (('user_id', 'user', 'id'), - ('project_id', 'project', 'id'), - ('on_behalf_of', 'project', 'id')), -} - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - load_tables = dict((table_name, sa.Table(table_name, meta, - autoload=True)) - for table_name in TABLES) - - if migrate_engine.name != 'sqlite': - for table_name, indexes in INDEXES.items(): - table = load_tables[table_name] - for column, ref_table_name, ref_column_name in indexes: - ref_table = load_tables[ref_table_name] - params = {'columns': [table.c[column]], - 'refcolumns': [ref_table.c[ref_column_name]]} - - if (migrate_engine.name == "mysql" and - table_name != 'alarm_history'): - params['name'] = "_".join(('fk', table_name, column)) - elif (migrate_engine.name == "postgresql" and - table_name == "sample"): - # The fk contains the old table name - params['name'] = "_".join(('meter', column, 'fkey')) - - fkey = ForeignKeyConstraint(**params) - fkey.drop() - - sourceassoc = load_tables['sourceassoc'] - if migrate_engine.name != 'sqlite': - idx = sa.Index('idx_su', sourceassoc.c.source_id, - sourceassoc.c.user_id) - idx.drop(bind=migrate_engine) - idx = sa.Index('idx_sp', sourceassoc.c.source_id, - sourceassoc.c.project_id) - idx.drop(bind=migrate_engine) - - params = {} - if migrate_engine.name == "mysql": - params = {'name': 'uniq_sourceassoc0sample_id'} - uc = UniqueConstraint('sample_id', table=sourceassoc, **params) - uc.create() - - params = {} - if migrate_engine.name == "mysql": - params = {'name': 'uniq_sourceassoc0sample_id0user_id'} - uc = UniqueConstraint('sample_id', 'user_id', - table=sourceassoc, **params) - uc.drop() - sourceassoc.c.user_id.drop() - sourceassoc.c.project_id.drop() - - for table_name in TABLES_DROP: - sa.Table(table_name, meta, autoload=True).drop() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py deleted file mode 100644 index b8a1a3db..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py +++ /dev/null @@ -1,68 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from migrate import ForeignKeyConstraint -import sqlalchemy as sa - -from ceilometer.storage.sqlalchemy import migration - - -TABLES = ['sample', 'resource', 'source', 'sourceassoc'] -DROP_TABLES = ['resource', 'source', 'sourceassoc'] - -INDEXES = { - "sample": (('resource_id', 'resource', 'id'),), - "sourceassoc": (('sample_id', 'sample', 'id'), - ('resource_id', 'resource', 'id'), - ('source_id', 'source', 'id')) -} - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - load_tables = dict((table_name, sa.Table(table_name, meta, - autoload=True)) - for table_name in TABLES) - - # drop foreign keys - if migrate_engine.name != 'sqlite': - for table_name, indexes in INDEXES.items(): - table = load_tables[table_name] - for column, ref_table_name, ref_column_name in indexes: - ref_table = load_tables[ref_table_name] - params = {'columns': [table.c[column]], - 'refcolumns': [ref_table.c[ref_column_name]]} - fk_table_name = table_name - if migrate_engine.name == "mysql": - params['name'] = "_".join(('fk', fk_table_name, column)) - elif (migrate_engine.name == "postgresql" and - table_name == 'sample'): - # fk was not renamed in script 030 - params['name'] = "_".join(('meter', column, 'fkey')) - fkey = ForeignKeyConstraint(**params) - fkey.drop() - - # create source field in sample - sample = load_tables['sample'] - sample.create_column(sa.Column('source_id', sa.String(255))) - - # move source values to samples - sourceassoc = load_tables['sourceassoc'] - query = (sa.select([sourceassoc.c.sample_id, sourceassoc.c.source_id]). - where(sourceassoc.c.sample_id.isnot(None))) - for sample_id, source_id in migration.paged(query): - (sample.update().where(sample_id == sample.c.id). - values({'source_id': source_id}).execute()) - - # drop tables - for table_name in DROP_TABLES: - sa.Table(table_name, meta, autoload=True).drop() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py deleted file mode 100644 index 18ee7a67..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from migrate import ForeignKeyConstraint -import sqlalchemy as sa - - -class ForeignKeyHandle(object): - def __init__(self, meta): - sample = sa.Table('sample', meta, autoload=True) - meter = sa.Table('meter', meta, autoload=True) - self.sample_params = {'columns': [sample.c.meter_id], - 'refcolumns': [meter.c.id]} - if meta.bind.engine.name == 'mysql': - self.sample_params['name'] = "fk_sample_meter_id" - - def __enter__(self): - ForeignKeyConstraint(**self.sample_params).drop() - - def __exit__(self, type, value, traceback): - ForeignKeyConstraint(**self.sample_params).create() - - -def upgrade(migrate_engine): - if migrate_engine.name == 'sqlite': - return - meta = sa.MetaData(bind=migrate_engine) - sample = sa.Table('sample', meta, autoload=True) - - with ForeignKeyHandle(meta): - # remove stray indexes implicitly created by InnoDB - for index in sample.indexes: - if index.name in ['fk_sample_meter_id', 'fk_sample_resource_id']: - index.drop() - sa.Index('ix_sample_meter_id', sample.c.meter_id).create() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py deleted file mode 100644 index 2fb7b47b..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py +++ /dev/null @@ -1,131 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import hashlib - -import migrate -from oslo_serialization import jsonutils -import sqlalchemy as sa - - -m_tables = [('metadata_text', sa.Text, True), - ('metadata_bool', sa.Boolean, False), - ('metadata_int', sa.BigInteger, False), - ('metadata_float', sa.Float(53), False)] - - -def _migrate_meta_tables(meta, col, new_col, new_fk): - for t_name, t_type, t_nullable in m_tables: - m_table = sa.Table(t_name, meta, autoload=True) - m_table_new = sa.Table( - '%s_new' % t_name, meta, - sa.Column('id', sa.Integer, sa.ForeignKey(new_fk), - primary_key=True), - sa.Column('meta_key', sa.String(255), - primary_key=True), - sa.Column('value', t_type, nullable=t_nullable), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - m_table_new.create() - - if m_table.select().scalar() is not None: - m_table_new.insert().from_select( - ['id', 'meta_key', 'value'], - sa.select([new_col, m_table.c.meta_key, - m_table.c.value]).where( - col == m_table.c.id).group_by( - new_col, m_table.c.meta_key, m_table.c.value)).execute() - - m_table.drop() - if meta.bind.engine.name != 'sqlite': - sa.Index('ix_%s_meta_key' % t_name, - m_table_new.c.meta_key).create() - m_table_new.rename(t_name) - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - resource = sa.Table( - 'resource', meta, - sa.Column('internal_id', sa.Integer, primary_key=True), - sa.Column('resource_id', sa.String(255)), - sa.Column('user_id', sa.String(255)), - sa.Column('project_id', sa.String(255)), - sa.Column('source_id', sa.String(255)), - sa.Column('resource_metadata', sa.Text), - sa.Column('metadata_hash', sa.String(32)), - mysql_engine='InnoDB', - mysql_charset='utf8') - resource.create() - - # copy resource data in to resource table - sample = sa.Table('sample', meta, autoload=True) - sa.Column('metadata_hash', sa.String(32)).create(sample) - for row in sa.select([sample.c.id, sample.c.resource_metadata]).execute(): - sample.update().where(sample.c.id == row['id']).values( - {sample.c.metadata_hash: - hashlib.md5(jsonutils.dumps( - row['resource_metadata'], - sort_keys=True)).hexdigest()}).execute() - query = sa.select([sample.c.resource_id, sample.c.user_id, - sample.c.project_id, sample.c.source_id, - sample.c.resource_metadata, - sample.c.metadata_hash]).distinct() - for row in query.execute(): - resource.insert().values( - resource_id=row['resource_id'], - user_id=row['user_id'], - project_id=row['project_id'], - source_id=row['source_id'], - resource_metadata=row['resource_metadata'], - metadata_hash=row['metadata_hash']).execute() - # link sample records to new resource records - sa.Column('resource_id_new', sa.Integer).create(sample) - for row in sa.select([resource]).execute(): - (sample.update(). - where(sa.and_( - sample.c.resource_id == row['resource_id'], - sample.c.user_id == row['user_id'], - sample.c.project_id == row['project_id'], - sample.c.source_id == row['source_id'], - sample.c.metadata_hash == row['metadata_hash'])). - values({sample.c.resource_id_new: row['internal_id']}).execute()) - - sample.c.resource_id.drop() - sample.c.metadata_hash.drop() - sample.c.resource_id_new.alter(name='resource_id') - # re-bind metadata to pick up alter name change - meta = sa.MetaData(bind=migrate_engine) - sample = sa.Table('sample', meta, autoload=True) - resource = sa.Table('resource', meta, autoload=True) - if migrate_engine.name != 'sqlite': - sa.Index('ix_resource_resource_id', resource.c.resource_id).create() - sa.Index('ix_sample_user_id', sample.c.user_id).drop() - sa.Index('ix_sample_project_id', sample.c.project_id).drop() - sa.Index('ix_sample_resource_id', sample.c.resource_id).create() - sa.Index('ix_sample_meter_id_resource_id', - sample.c.meter_id, sample.c.resource_id).create() - - params = {'columns': [sample.c.resource_id], - 'refcolumns': [resource.c.internal_id]} - if migrate_engine.name == 'mysql': - params['name'] = 'fk_sample_resource_internal_id' - migrate.ForeignKeyConstraint(**params).create() - - sample.c.user_id.drop() - sample.c.project_id.drop() - sample.c.source_id.drop() - sample.c.resource_metadata.drop() - - _migrate_meta_tables(meta, sample.c.id, sample.c.resource_id, - 'resource.internal_id') diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py deleted file mode 100644 index 055f2ee6..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py +++ /dev/null @@ -1,56 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# NOTE (gordc): this is a copy of 024 migration script which missed pgsql - -import sqlalchemy as sa - -from ceilometer.storage.sqlalchemy import migration -from ceilometer.storage.sqlalchemy import models - - -def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False): - temp_col_n = 'convert_data_type_temp_col' - # Override column we're going to convert with from_t, since the type we're - # replacing could be custom and we need to tell SQLALchemy how to perform - # CRUD operations with it. - table = sa.Table(table.name, table.metadata, sa.Column(col, from_t), - extend_existing=True) - sa.Column(temp_col_n, to_t).create(table) - - key_attr = getattr(table.c, pk_attr) - orig_col = getattr(table.c, col) - new_col = getattr(table.c, temp_col_n) - - query = sa.select([key_attr, orig_col]) - for key, value in migration.paged(query): - (table.update().where(key_attr == key).values({temp_col_n: value}). - execute()) - - orig_col.drop() - new_col.alter(name=col) - if index: - sa.Index('ix_%s_%s' % (table.name, col), new_col).create() - - -def upgrade(migrate_engine): - if migrate_engine.name == 'postgresql': - meta = sa.MetaData(bind=migrate_engine) - event = sa.Table('event', meta, autoload=True) - _convert_data_type(event, 'generated', sa.Float(), - models.PreciseTimestamp(), - pk_attr='id', index=True) - trait = sa.Table('trait', meta, autoload=True) - _convert_data_type(trait, 't_datetime', sa.Float(), - models.PreciseTimestamp(), - pk_attr='id', index=True) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py deleted file mode 100644 index 07a94deb..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py +++ /dev/null @@ -1,24 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column -from sqlalchemy import MetaData -from sqlalchemy import String -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - alarm = Table('alarm', meta, autoload=True) - severity = Column('severity', String(50)) - alarm.create_column(severity) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py deleted file mode 100644 index a9492381..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py +++ /dev/null @@ -1,54 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - -from ceilometer.storage.sqlalchemy import models - -tables = [('trait_text', sa.String(255), True, 't_string', 1), - ('trait_int', sa.Integer, False, 't_int', 2), - ('trait_float', sa.Float(53), False, 't_float', 3), - ('trait_datetime', models.PreciseTimestamp(), - False, 't_datetime', 4)] - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - trait = sa.Table('trait', meta, autoload=True) - event = sa.Table('event', meta, autoload=True) - trait_type = sa.Table('trait_type', meta, autoload=True) - for t_name, t_type, t_nullable, col_name, __ in tables: - t_table = sa.Table( - t_name, meta, - sa.Column('event_id', sa.Integer, - sa.ForeignKey(event.c.id), primary_key=True), - sa.Column('key', sa.String(255), primary_key=True), - sa.Column('value', t_type, nullable=t_nullable), - sa.Index('ix_%s_event_id_key' % t_name, - 'event_id', 'key'), - mysql_engine='InnoDB', - mysql_charset='utf8', - ) - t_table.create() - query = sa.select( - [trait.c.event_id, - trait_type.c.desc, - trait.c[col_name]]).select_from( - trait.join(trait_type, - trait.c.trait_type_id == trait_type.c.id)).where( - trait.c[col_name] != sa.null()) - if query.alias().select().scalar() is not None: - t_table.insert().from_select( - ['event_id', 'key', 'value'], query).execute() - trait.drop() - trait_type.drop() diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py deleted file mode 100644 index 1e8b4614..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py +++ /dev/null @@ -1,21 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - - -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - event = sa.Table('event', meta, autoload=True) - raw = sa.Column('raw', sa.Text) - event.create_column(raw) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/043_reduce_uuid_data_types.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/043_reduce_uuid_data_types.py deleted file mode 100644 index 03a5525b..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/043_reduce_uuid_data_types.py +++ /dev/null @@ -1,19 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def upgrade(migrate_engine): - # NOTE(gordc): this is a noop script to handle bug1468916 - # previous lowering of id length will fail if db contains data longer. - # this skips migration for those failing. the next script will resize - # if this original migration passed. - pass diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_types.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_types.py deleted file mode 100644 index a7db70cb..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_types.py +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData -from sqlalchemy import String -from sqlalchemy import Table - - -def upgrade(migrate_engine): - meta = MetaData(bind=migrate_engine) - resource = Table('resource', meta, autoload=True) - resource.c.user_id.alter(type=String(255)) - resource.c.project_id.alter(type=String(255)) - resource.c.resource_id.alter(type=String(255)) - resource.c.source_id.alter(type=String(255)) - sample = Table('sample', meta, autoload=True) - sample.c.message_signature.alter(type=String(64)) - sample.c.message_id.alter(type=String(128)) - alarm = Table('alarm', meta, autoload=True) - alarm.c.alarm_id.alter(type=String(128)) - alarm.c.user_id.alter(type=String(255)) - alarm.c.project_id.alter(type=String(255)) - alarm_history = Table('alarm_history', meta, autoload=True) - alarm_history.c.alarm_id.alter(type=String(128)) - alarm_history.c.user_id.alter(type=String(255)) - alarm_history.c.project_id.alter(type=String(255)) - alarm_history.c.event_id.alter(type=String(128)) - alarm_history.c.on_behalf_of.alter(type=String(255)) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py deleted file mode 100644 index ac59595d..00000000 --- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy as sa - - -# Add index on metadata_hash column of resource -def upgrade(migrate_engine): - meta = sa.MetaData(bind=migrate_engine) - resource = sa.Table('resource', meta, autoload=True) - index = sa.Index('ix_resource_metadata_hash', resource.c.metadata_hash) - index.create(bind=migrate_engine) diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/__init__.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/storage/sqlalchemy/migration.py b/ceilometer/storage/sqlalchemy/migration.py deleted file mode 100644 index 160e68e5..00000000 --- a/ceilometer/storage/sqlalchemy/migration.py +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -def paged(query, size=1000): - """Page query results - - :param query: the SQLAlchemy query to execute - :param size: the max page size - return: generator with query data - """ - offset = 0 - while True: - page = query.offset(offset).limit(size).execute() - if page.rowcount <= 0: - # There are no more rows - break - for row in page: - yield row - offset += size diff --git a/ceilometer/storage/sqlalchemy/models.py b/ceilometer/storage/sqlalchemy/models.py index 223ef0f5..c726e7bf 100644 --- a/ceilometer/storage/sqlalchemy/models.py +++ b/ceilometer/storage/sqlalchemy/models.py @@ -13,15 +13,11 @@ """ SQLAlchemy models for Ceilometer data. """ -import hashlib import json -from oslo_utils import timeutils import six -from sqlalchemy import (Column, Integer, String, ForeignKey, Index, - UniqueConstraint, BigInteger) -from sqlalchemy import event -from sqlalchemy import Float, Boolean, Text, DateTime +from sqlalchemy import Column, Integer, String, ForeignKey, Index +from sqlalchemy import Float, DateTime from sqlalchemy.dialects.mysql import DECIMAL from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import deferred @@ -99,148 +95,6 @@ class CeilometerBase(object): Base = declarative_base(cls=CeilometerBase) -class MetaText(Base): - """Metering text metadata.""" - - __tablename__ = 'metadata_text' - __table_args__ = ( - Index('ix_meta_text_key', 'meta_key'), - ) - id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True) - meta_key = Column(String(255), primary_key=True) - value = Column(Text) - - -class MetaBool(Base): - """Metering boolean metadata.""" - - __tablename__ = 'metadata_bool' - __table_args__ = ( - Index('ix_meta_bool_key', 'meta_key'), - ) - id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True) - meta_key = Column(String(255), primary_key=True) - value = Column(Boolean) - - -class MetaBigInt(Base): - """Metering integer metadata.""" - - __tablename__ = 'metadata_int' - __table_args__ = ( - Index('ix_meta_int_key', 'meta_key'), - ) - id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True) - meta_key = Column(String(255), primary_key=True) - value = Column(BigInteger, default=False) - - -class MetaFloat(Base): - """Metering float metadata.""" - - __tablename__ = 'metadata_float' - __table_args__ = ( - Index('ix_meta_float_key', 'meta_key'), - ) - id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True) - meta_key = Column(String(255), primary_key=True) - value = Column(Float(53), default=False) - - -class Meter(Base): - """Meter definition data.""" - - __tablename__ = 'meter' - __table_args__ = ( - UniqueConstraint('name', 'type', 'unit', name='def_unique'), - Index('ix_meter_name', 'name'), - ) - id = Column(Integer, primary_key=True) - name = Column(String(255), nullable=False) - type = Column(String(255)) - unit = Column(String(255)) - samples = relationship("Sample", backref="meter") - - -class Resource(Base): - """Resource data.""" - - __tablename__ = 'resource' - __table_args__ = ( - # TODO(gordc): this should exist but the attribute values we set - # for user/project/source/resource id's are too large - # for an uuid. - # UniqueConstraint('resource_id', 'user_id', 'project_id', - # 'source_id', 'metadata_hash', - # name='res_def_unique'), - Index('ix_resource_resource_id', 'resource_id'), - Index('ix_resource_metadata_hash', 'metadata_hash'), - ) - - internal_id = Column(Integer, primary_key=True) - user_id = Column(String(255)) - project_id = Column(String(255)) - source_id = Column(String(255)) - resource_id = Column(String(255), nullable=False) - resource_metadata = deferred(Column(JSONEncodedDict())) - metadata_hash = deferred(Column(String(32))) - samples = relationship("Sample", backref="resource") - meta_text = relationship("MetaText", backref="resource", - cascade="all, delete-orphan") - meta_float = relationship("MetaFloat", backref="resource", - cascade="all, delete-orphan") - meta_int = relationship("MetaBigInt", backref="resource", - cascade="all, delete-orphan") - meta_bool = relationship("MetaBool", backref="resource", - cascade="all, delete-orphan") - - -@event.listens_for(Resource, "before_insert") -def before_insert(mapper, connection, target): - metadata = json.dumps(target.resource_metadata, sort_keys=True) - target.metadata_hash = hashlib.md5(metadata).hexdigest() - - -class Sample(Base): - """Metering data.""" - - __tablename__ = 'sample' - __table_args__ = ( - Index('ix_sample_timestamp', 'timestamp'), - Index('ix_sample_resource_id', 'resource_id'), - Index('ix_sample_meter_id', 'meter_id'), - Index('ix_sample_meter_id_resource_id', 'meter_id', 'resource_id') - ) - id = Column(Integer, primary_key=True) - meter_id = Column(Integer, ForeignKey('meter.id')) - resource_id = Column(Integer, ForeignKey('resource.internal_id')) - volume = Column(Float(53)) - timestamp = Column(PreciseTimestamp(), default=lambda: timeutils.utcnow()) - recorded_at = Column(PreciseTimestamp(), - default=lambda: timeutils.utcnow()) - message_signature = Column(String(64)) - message_id = Column(String(128)) - - -class FullSample(object): - """A fake model for query samples.""" - id = Sample.id - timestamp = Sample.timestamp - message_id = Sample.message_id - message_signature = Sample.message_signature - recorded_at = Sample.recorded_at - counter_name = Meter.name - counter_type = Meter.type - counter_unit = Meter.unit - counter_volume = Sample.volume - resource_id = Resource.resource_id - source_id = Resource.source_id - user_id = Resource.user_id - project_id = Resource.project_id - resource_metadata = Resource.resource_metadata - internal_id = Resource.internal_id - - class EventType(Base): """Types of event records.""" __tablename__ = 'event_type' diff --git a/ceilometer/storage/sqlalchemy/utils.py b/ceilometer/storage/sqlalchemy/utils.py deleted file mode 100644 index 2003c24c..00000000 --- a/ceilometer/storage/sqlalchemy/utils.py +++ /dev/null @@ -1,131 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -import operator - -import six -from sqlalchemy import and_ -from sqlalchemy import asc -from sqlalchemy import desc -from sqlalchemy import not_ -from sqlalchemy import or_ -from sqlalchemy.orm import aliased - -import ceilometer -from ceilometer.storage.sqlalchemy import models - - -META_TYPE_MAP = {bool: models.MetaBool, - str: models.MetaText, - six.text_type: models.MetaText, - type(None): models.MetaText, - int: models.MetaBigInt, - float: models.MetaFloat} -if six.PY2: - META_TYPE_MAP[long] = models.MetaBigInt - - -class QueryTransformer(object): - operators = {"=": operator.eq, - "<": operator.lt, - ">": operator.gt, - "<=": operator.le, - "=<": operator.le, - ">=": operator.ge, - "=>": operator.ge, - "!=": operator.ne, - "in": lambda field_name, values: field_name.in_(values), - "=~": lambda field, value: field.op("regexp")(value)} - - # operators which are different for different dialects - dialect_operators = {'postgresql': {'=~': (lambda field, value: - field.op("~")(value))}} - - complex_operators = {"or": or_, - "and": and_, - "not": not_} - - ordering_functions = {"asc": asc, - "desc": desc} - - def __init__(self, table, query, dialect='mysql'): - self.table = table - self.query = query - self.dialect_name = dialect - - def _get_operator(self, op): - return (self.dialect_operators.get(self.dialect_name, {}).get(op) - or self.operators[op]) - - def _handle_complex_op(self, complex_op, nodes): - op = self.complex_operators[complex_op] - if op == not_: - nodes = [nodes] - element_list = [] - for node in nodes: - element = self._transform(node) - element_list.append(element) - return op(*element_list) - - def _handle_simple_op(self, simple_op, nodes): - op = self._get_operator(simple_op) - field_name, value = list(nodes.items())[0] - if field_name.startswith('resource_metadata.'): - return self._handle_metadata(op, field_name, value) - else: - return op(getattr(self.table, field_name), value) - - def _handle_metadata(self, op, field_name, value): - if op == self.operators["in"]: - raise ceilometer.NotImplementedError('Metadata query with in ' - 'operator is not implemented') - field_name = field_name[len('resource_metadata.'):] - meta_table = META_TYPE_MAP[type(value)] - meta_alias = aliased(meta_table) - on_clause = and_(self.table.internal_id == meta_alias.id, - meta_alias.meta_key == field_name) - # outer join is needed to support metaquery - # with or operator on non existent metadata field - # see: test_query_non_existing_metadata_with_result - # test case. - self.query = self.query.outerjoin(meta_alias, on_clause) - return op(meta_alias.value, value) - - def _transform(self, sub_tree): - operator, nodes = list(sub_tree.items())[0] - if operator in self.complex_operators: - return self._handle_complex_op(operator, nodes) - else: - return self._handle_simple_op(operator, nodes) - - def apply_filter(self, expression_tree): - condition = self._transform(expression_tree) - self.query = self.query.filter(condition) - - def apply_options(self, orderby, limit): - self._apply_order_by(orderby) - if limit is not None: - self.query = self.query.limit(limit) - - def _apply_order_by(self, orderby): - if orderby is not None: - for field in orderby: - attr, order = list(field.items())[0] - ordering_function = self.ordering_functions[order] - self.query = self.query.order_by(ordering_function( - getattr(self.table, attr))) - else: - self.query = self.query.order_by(desc(self.table.timestamp)) - - def get_query(self): - return self.query diff --git a/ceilometer/telemetry/__init__.py b/ceilometer/telemetry/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/telemetry/notifications.py b/ceilometer/telemetry/notifications.py deleted file mode 100644 index db825ac0..00000000 --- a/ceilometer/telemetry/notifications.py +++ /dev/null @@ -1,66 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -import oslo_messaging - -from ceilometer.agent import plugin_base -from ceilometer import sample - -OPTS = [ - cfg.StrOpt('ceilometer_control_exchange', - default='ceilometer', - help="Exchange name for ceilometer notifications."), -] - - -cfg.CONF.register_opts(OPTS) - - -class TelemetryBase(plugin_base.NotificationBase): - """Convert telemetry notification into Samples.""" - - def get_targets(self, conf): - """Return a sequence of oslo_messaging.Target - - Sequence defining the exchange and topics to be connected for this - plugin. - """ - return [oslo_messaging.Target( - topic=topic, exchange=conf.ceilometer_control_exchange) - for topic in self.get_notification_topics(conf)] - - -class TelemetryIpc(TelemetryBase): - """Handle sample from notification bus - - Telemetry samples can be posted via API or polled by Polling agent. - """ - - event_types = ['telemetry.api', 'telemetry.polling'] - - def process_notification(self, message): - samples = message['payload']['samples'] - for sample_dict in samples: - yield sample.Sample( - name=sample_dict['counter_name'], - type=sample_dict['counter_type'], - unit=sample_dict['counter_unit'], - volume=sample_dict['counter_volume'], - user_id=sample_dict['user_id'], - project_id=sample_dict['project_id'], - resource_id=sample_dict['resource_id'], - timestamp=sample_dict['timestamp'], - resource_metadata=sample_dict['resource_metadata'], - source=sample_dict['source'], - id=sample_dict['message_id']) diff --git a/ceilometer/tests/base.py b/ceilometer/tests/base.py index ca6b0071..2156e003 100644 --- a/ceilometer/tests/base.py +++ b/ceilometer/tests/base.py @@ -16,33 +16,16 @@ import functools import os.path -import oslo_messaging.conffixture from oslo_utils import timeutils from oslotest import base -from oslotest import mockpatch import six from testtools import testcase import webtest import ceilometer -from ceilometer import messaging class BaseTestCase(base.BaseTestCase): - def setup_messaging(self, conf, exchange=None): - self.useFixture(oslo_messaging.conffixture.ConfFixture(conf)) - conf.set_override("notification_driver", "messaging") - if not exchange: - exchange = 'ceilometer' - conf.set_override("control_exchange", exchange) - - # NOTE(sileht): Ensure a new oslo.messaging driver is loaded - # between each tests - self.transport = messaging.get_transport("fake://", cache=False) - self.useFixture(mockpatch.Patch( - 'ceilometer.messaging.get_transport', - return_value=self.transport)) - def assertTimestampEqual(self, first, second, msg=None): """Checks that two timestamps are equals. diff --git a/ceilometer/tests/db.py b/ceilometer/tests/db.py index d8c74f78..38542489 100644 --- a/ceilometer/tests/db.py +++ b/ceilometer/tests/db.py @@ -48,10 +48,8 @@ class MongoDbManager(fixtures.Fixture): action='ignore', message='.*you must provide a username and password.*') try: - self.connection = storage.get_connection( - self.url, 'ceilometer.metering.storage') self.event_connection = storage.get_connection( - self.url, 'ceilometer.event.storage') + self.url) except storage.StorageBadVersion as e: raise testcase.TestSkipped(six.text_type(e)) @@ -77,10 +75,7 @@ class SQLManager(fixtures.Fixture): def setUp(self): super(SQLManager, self).setUp() - self.connection = storage.get_connection( - self.url, 'ceilometer.metering.storage') - self.event_connection = storage.get_connection( - self.url, 'ceilometer.event.storage') + self.event_connection = storage.get_connection(self.url) class PgSQLManager(SQLManager): @@ -103,10 +98,8 @@ class ElasticSearchManager(fixtures.Fixture): def setUp(self): super(ElasticSearchManager, self).setUp() - self.connection = storage.get_connection( - 'sqlite://', 'ceilometer.metering.storage') self.event_connection = storage.get_connection( - self.url, 'ceilometer.event.storage') + self.url) # prefix each test with unique index name self.event_connection.index_name = 'events_%s' % uuid.uuid4().hex # force index on write so data is queryable right away @@ -119,10 +112,8 @@ class HBaseManager(fixtures.Fixture): def setUp(self): super(HBaseManager, self).setUp() - self.connection = storage.get_connection( - self.url, 'ceilometer.metering.storage') self.event_connection = storage.get_connection( - self.url, 'ceilometer.event.storage') + self.url) # Unique prefix for each test to keep data is distinguished because # all test data is stored in one table data_prefix = str(uuid.uuid4().hex) @@ -159,10 +150,8 @@ class SQLiteManager(fixtures.Fixture): def setUp(self): super(SQLiteManager, self).setUp() - self.connection = storage.get_connection( - self.url, 'ceilometer.metering.storage') self.event_connection = storage.get_connection( - self.url, 'ceilometer.event.storage') + self.url) @six.add_metaclass(test_base.SkipNotImplementedMeta) @@ -206,35 +195,19 @@ class TestBase(test_base.BaseTestCase): self.useFixture(self.db_manager) - self.conn = self.db_manager.connection - self.conn.upgrade() - self.event_conn = self.db_manager.event_connection self.event_conn.upgrade() self.useFixture(mockpatch.Patch('ceilometer.storage.get_connection', side_effect=self._get_connection)) - # Set a default location for the pipeline config file so the - # tests work even if ceilometer is not installed globally on - # the system. - self.CONF.import_opt('pipeline_cfg_file', 'ceilometer.pipeline') - self.CONF.set_override( - 'pipeline_cfg_file', - self.path_get('etc/ceilometer/pipeline.yaml') - ) - def tearDown(self): self.event_conn.clear() self.event_conn = None - self.conn.clear() - self.conn = None super(TestBase, self).tearDown() - def _get_connection(self, url, namespace): - if namespace == "ceilometer.event.storage": - return self.event_conn - return self.conn + def _get_connection(self, url): + return self.event_conn def run_with(*drivers): diff --git a/ceilometer/tests/functional/api/__init__.py b/ceilometer/tests/functional/api/__init__.py index 6dde5fae..aa111e54 100644 --- a/ceilometer/tests/functional/api/__init__.py +++ b/ceilometer/tests/functional/api/__init__.py @@ -41,7 +41,6 @@ class FunctionalTest(db_test_base.TestBase): def setUp(self): super(FunctionalTest, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf - self.setup_messaging(self.CONF) opts.set_defaults(self.CONF) self.CONF.set_override("auth_version", "v2.0", @@ -50,9 +49,6 @@ class FunctionalTest(db_test_base.TestBase): self.path_get('etc/ceilometer/policy.json'), group='oslo_policy') - self.CONF.set_override('gnocchi_is_enabled', False, group='api') - self.CONF.set_override('aodh_is_enabled', False, group='api') - self.app = self._make_app() def _make_app(self, enable_acl=False): diff --git a/ceilometer/tests/functional/api/v2/test_acl_scenarios.py b/ceilometer/tests/functional/api/v2/test_acl_scenarios.py index f30e090d..4c76c73f 100644 --- a/ceilometer/tests/functional/api/v2/test_acl_scenarios.py +++ b/ceilometer/tests/functional/api/v2/test_acl_scenarios.py @@ -25,8 +25,6 @@ import webtest from ceilometer.api import app from ceilometer.event.storage import models as ev_model -from ceilometer.publisher import utils -from ceilometer import sample from ceilometer.tests.functional.api import v2 VALID_TOKEN = uuid.uuid4().hex @@ -57,35 +55,6 @@ class TestAPIACL(v2.FunctionalTest): user_id='user_id1', is_v2=True) - for cnt in [ - sample.Sample( - 'meter.test', - 'cumulative', - '', - 1, - 'user-good', - 'project-good', - 'resource-good', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample'}, - source='test_source'), - sample.Sample( - 'meter.mine', - 'gauge', - '', - 1, - 'user-fred', - 'project-good', - 'resource-56', - timestamp=datetime.datetime(2012, 7, 2, 10, 43), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample4'}, - source='test_source')]: - msg = utils.meter_message_from_counter( - cnt, self.CONF.publisher.telemetry_secret) - self.conn.record_metering_data(msg) - def get_json(self, path, expect_errors=False, headers=None, q=None, **params): return super(TestAPIACL, self).get_json(path, @@ -99,90 +68,6 @@ class TestAPIACL(v2.FunctionalTest): self.CONF.set_override("api_paste_config", file_name) return webtest.TestApp(app.load_app()) - def test_non_authenticated(self): - response = self.get_json('/meters', expect_errors=True) - self.assertEqual(401, response.status_int) - - def test_authenticated_wrong_role(self): - response = self.get_json('/meters', - expect_errors=True, - headers={ - "X-Roles": "Member", - "X-Tenant-Name": "admin", - "X-Project-Id": - "bc23a9d531064583ace8f67dad60f6bb", - }) - self.assertEqual(401, response.status_int) - - # FIXME(dhellmann): This test is not properly looking at the tenant - # info. We do not correctly detect the improper tenant. That's - # really something the keystone middleware would have to do using - # the incoming token, which we aren't providing. - # - # def test_authenticated_wrong_tenant(self): - # response = self.get_json('/meters', - # expect_errors=True, - # headers={ - # "X-Roles": "admin", - # "X-Tenant-Name": "achoo", - # "X-Project-Id": "bc23a9d531064583ace8f67dad60f6bb", - # }) - # self.assertEqual(401, response.status_int) - - def test_authenticated(self): - data = self.get_json('/meters', - headers={"X-Auth-Token": VALID_TOKEN, - "X-Roles": "admin", - "X-Project-Id": - "bc23a9d531064583ace8f67dad60f6bb", - }) - ids = set(r['resource_id'] for r in data) - self.assertEqual(set(['resource-good', 'resource-56']), ids) - - def test_with_non_admin_missing_project_query(self): - data = self.get_json('/meters', - headers={"X-Roles": "Member", - "X-Auth-Token": VALID_TOKEN2, - "X-Project-Id": "project-good"}) - ids = set(r['resource_id'] for r in data) - self.assertEqual(set(['resource-good', 'resource-56']), ids) - - def test_with_non_admin(self): - data = self.get_json('/meters', - headers={"X-Roles": "Member", - "X-Auth-Token": VALID_TOKEN2, - "X-Project-Id": "project-good"}, - q=[{'field': 'project_id', - 'value': 'project-good', - }]) - ids = set(r['resource_id'] for r in data) - self.assertEqual(set(['resource-good', 'resource-56']), ids) - - def test_non_admin_wrong_project(self): - data = self.get_json('/meters', - expect_errors=True, - headers={"X-Roles": "Member", - "X-Auth-Token": VALID_TOKEN2, - "X-Project-Id": "project-good"}, - q=[{'field': 'project_id', - 'value': 'project-wrong', - }]) - self.assertEqual(401, data.status_int) - - def test_non_admin_two_projects(self): - data = self.get_json('/meters', - expect_errors=True, - headers={"X-Roles": "Member", - "X-Auth-Token": VALID_TOKEN2, - "X-Project-Id": "project-good"}, - q=[{'field': 'project_id', - 'value': 'project-good', - }, - {'field': 'project_id', - 'value': 'project-naughty', - }]) - self.assertEqual(401, data.status_int) - class TestAPIEventACL(TestAPIACL): diff --git a/ceilometer/tests/functional/api/v2/test_api_upgrade.py b/ceilometer/tests/functional/api/v2/test_api_upgrade.py deleted file mode 100644 index 7e4b427f..00000000 --- a/ceilometer/tests/functional/api/v2/test_api_upgrade.py +++ /dev/null @@ -1,148 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_utils import fileutils -from oslotest import mockpatch -import six - -from ceilometer.tests.functional.api import v2 - - -class TestAPIUpgradePath(v2.FunctionalTest): - def _make_app(self): - content = ('{"default": ""}') - if six.PY3: - content = content.encode('utf-8') - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='policy', - suffix='.json') - self.CONF.set_override("policy_file", self.tempfile, - group='oslo_policy') - return super(TestAPIUpgradePath, self)._make_app() - - def _setup_osloconfig_options(self): - self.CONF.set_override('gnocchi_is_enabled', True, group='api') - self.CONF.set_override('aodh_is_enabled', True, group='api') - self.CONF.set_override('aodh_url', 'http://alarm-endpoint:8008/', - group='api') - - def _setup_keystone_mock(self): - self.CONF.set_override('gnocchi_is_enabled', None, group='api') - self.CONF.set_override('aodh_is_enabled', None, group='api') - self.CONF.set_override('aodh_url', None, group='api') - self.CONF.set_override('meter_dispatchers', ['database']) - self.ks = mock.Mock() - self.catalog = (self.ks.session.auth.get_access. - return_value.service_catalog) - self.catalog.url_for.side_effect = self._url_for - self.useFixture(mockpatch.Patch( - 'ceilometer.keystone_client.get_client', return_value=self.ks)) - - @staticmethod - def _url_for(service_type=None): - if service_type == 'metric': - return 'http://gnocchi/' - elif service_type == 'alarming': - return 'http://alarm-endpoint:8008/' - - def _do_test_gnocchi_enabled_without_database_backend(self): - self.CONF.set_override('meter_dispatchers', 'gnocchi') - for endpoint in ['meters', 'samples', 'resources']: - response = self.app.get(self.PATH_PREFIX + '/' + endpoint, - status=410) - self.assertIn(b'Gnocchi API', response.body) - - headers_events = {"X-Roles": "admin", - "X-User-Id": "user1", - "X-Project-Id": "project1"} - for endpoint in ['events', 'event_types']: - self.app.get(self.PATH_PREFIX + '/' + endpoint, - headers=headers_events, - status=200) - - response = self.post_json('/query/samples', - params={ - "filter": '{"=": {"type": "creation"}}', - "orderby": '[{"timestamp": "DESC"}]', - "limit": 3 - }, status=410) - self.assertIn(b'Gnocchi API', response.body) - sample_params = { - "counter_type": "gauge", - "counter_name": "fake_counter", - "resource_id": "fake_resource_id", - "counter_unit": "fake_unit", - "counter_volume": "1" - } - self.post_json('/meters/fake_counter', - params=[sample_params], - status=201) - response = self.post_json('/meters/fake_counter?direct=1', - params=[sample_params], - status=400) - self.assertIn(b'direct option cannot be true when Gnocchi is enabled', - response.body) - - def _do_test_alarm_redirect(self): - response = self.app.get(self.PATH_PREFIX + '/alarms', - expect_errors=True) - - self.assertEqual(307, response.status_code) - self.assertEqual("http://alarm-endpoint:8008/v2/alarms", - response.headers['Location']) - - response = self.app.get(self.PATH_PREFIX + '/alarms/uuid', - expect_errors=True) - - self.assertEqual(307, response.status_code) - self.assertEqual("http://alarm-endpoint:8008/v2/alarms/uuid", - response.headers['Location']) - - response = self.app.delete(self.PATH_PREFIX + '/alarms/uuid', - expect_errors=True) - - self.assertEqual(307, response.status_code) - self.assertEqual("http://alarm-endpoint:8008/v2/alarms/uuid", - response.headers['Location']) - - response = self.post_json('/query/alarms', - params={ - "filter": '{"=": {"type": "creation"}}', - "orderby": '[{"timestamp": "DESC"}]', - "limit": 3 - }, status=307) - self.assertEqual("http://alarm-endpoint:8008/v2/query/alarms", - response.headers['Location']) - - def test_gnocchi_enabled_without_database_backend_keystone(self): - self._setup_keystone_mock() - self._do_test_gnocchi_enabled_without_database_backend() - self.catalog.url_for.assert_has_calls([ - mock.call(service_type="alarming"), - mock.call(service_type="metric")], - any_order=True) - - def test_gnocchi_enabled_without_database_backend_configoptions(self): - self._setup_osloconfig_options() - self._do_test_gnocchi_enabled_without_database_backend() - - def test_alarm_redirect_keystone(self): - self._setup_keystone_mock() - self._do_test_alarm_redirect() - self.assertEqual([mock.call(service_type="alarming")], - self.catalog.url_for.mock_calls) - - def test_alarm_redirect_configoptions(self): - self._setup_osloconfig_options() - self._do_test_alarm_redirect() diff --git a/ceilometer/tests/functional/api/v2/test_app.py b/ceilometer/tests/functional/api/v2/test_app.py index 9aef1612..55943005 100644 --- a/ceilometer/tests/functional/api/v2/test_app.py +++ b/ceilometer/tests/functional/api/v2/test_app.py @@ -18,14 +18,6 @@ from ceilometer.tests.functional.api import v2 -class TestPecanApp(v2.FunctionalTest): - - def test_pecan_extension_guessing_unset(self): - # check Pecan does not assume .jpg is an extension - response = self.app.get(self.PATH_PREFIX + '/meters/meter.jpg') - self.assertEqual('application/json', response.content_type) - - class TestApiMiddleware(v2.FunctionalTest): no_lang_translated_error = 'No lang translated error' diff --git a/ceilometer/tests/functional/api/v2/test_capabilities.py b/ceilometer/tests/functional/api/v2/test_capabilities.py index f3c880ef..774eda52 100644 --- a/ceilometer/tests/functional/api/v2/test_capabilities.py +++ b/ceilometer/tests/functional/api/v2/test_capabilities.py @@ -29,4 +29,4 @@ class TestCapabilitiesController(tests_api.FunctionalTest): self.assertIsNotNone(data) self.assertNotEqual({}, data) self.assertIn('api', data) - self.assertIn('storage', data) + self.assertIn('event_storage', data) diff --git a/ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py b/ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py deleted file mode 100644 index b9646ab5..00000000 --- a/ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py +++ /dev/null @@ -1,314 +0,0 @@ -# -# Copyright Ericsson AB 2013. All rights reserved -# -# Authors: Ildiko Vancsa -# Balazs Gibizer -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests complex queries for samples -""" - -import datetime - -from oslo_utils import timeutils - -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests.functional.api import v2 as tests_api - - -admin_header = {"X-Roles": "admin", - "X-Project-Id": - "project-id1"} -non_admin_header = {"X-Roles": "Member", - "X-Project-Id": - "project-id1"} - - -class TestQueryMetersController(tests_api.FunctionalTest): - def setUp(self): - super(TestQueryMetersController, self).setUp() - self.url = '/query/samples' - - for cnt in [ - sample.Sample('meter.test', - 'cumulative', - '', - 1, - 'user-id1', - 'project-id1', - 'resource-id1', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server1', - 'tag': 'self.sample', - 'size': 456, - 'util': 0.25, - 'is_public': True}, - source='test_source'), - sample.Sample('meter.test', - 'cumulative', - '', - 2, - 'user-id2', - 'project-id2', - 'resource-id2', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server2', - 'tag': 'self.sample', - 'size': 123, - 'util': 0.75, - 'is_public': True}, - source='test_source'), - sample.Sample('meter.test', - 'cumulative', - '', - 3, - 'user-id3', - 'project-id3', - 'resource-id3', - timestamp=datetime.datetime(2012, 7, 2, 10, 42), - resource_metadata={'display_name': 'test-server3', - 'tag': 'self.sample', - 'size': 789, - 'util': 0.95, - 'is_public': True}, - source='test_source')]: - - msg = utils.meter_message_from_counter( - cnt, self.CONF.publisher.telemetry_secret) - self.conn.record_metering_data(msg) - - def test_query_fields_are_optional(self): - data = self.post_json(self.url, params={}) - self.assertEqual(3, len(data.json)) - - def test_query_with_isotime(self): - date_time = datetime.datetime(2012, 7, 2, 10, 41) - isotime = date_time.isoformat() - - data = self.post_json(self.url, - params={"filter": - '{">=": {"timestamp": "' - + isotime + '"}}'}) - - self.assertEqual(2, len(data.json)) - for sample_item in data.json: - result_time = timeutils.parse_isotime(sample_item['timestamp']) - result_time = result_time.replace(tzinfo=None) - self.assertTrue(result_time >= date_time) - - def test_non_admin_tenant_sees_only_its_own_project(self): - data = self.post_json(self.url, - params={}, - headers=non_admin_header) - for sample_item in data.json: - self.assertEqual("project-id1", sample_item['project_id']) - - def test_non_admin_tenant_cannot_query_others_project(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"project_id": "project-id2"}}'}, - expect_errors=True, - headers=non_admin_header) - - self.assertEqual(401, data.status_int) - self.assertIn(b"Not Authorized to access project project-id2", - data.body) - - def test_non_admin_tenant_can_explicitly_filter_for_own_project(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"project_id": "project-id1"}}'}, - headers=non_admin_header) - - for sample_item in data.json: - self.assertEqual("project-id1", sample_item['project_id']) - - def test_admin_tenant_sees_every_project(self): - data = self.post_json(self.url, - params={}, - headers=admin_header) - - self.assertEqual(3, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['project_id'], - (["project-id1", "project-id2", "project-id3"])) - - def test_admin_tenant_sees_every_project_with_complex_filter(self): - filter = ('{"OR": ' + - '[{"=": {"project_id": "project-id1"}}, ' + - '{"=": {"project_id": "project-id2"}}]}') - data = self.post_json(self.url, - params={"filter": filter}, - headers=admin_header) - - self.assertEqual(2, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['project_id'], - (["project-id1", "project-id2"])) - - def test_admin_tenant_sees_every_project_with_in_filter(self): - filter = ('{"In": ' + - '{"project_id": ["project-id1", "project-id2"]}}') - data = self.post_json(self.url, - params={"filter": filter}, - headers=admin_header) - - self.assertEqual(2, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['project_id'], - (["project-id1", "project-id2"])) - - def test_admin_tenant_can_query_any_project(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"project_id": "project-id2"}}'}, - headers=admin_header) - - self.assertEqual(1, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['project_id'], set(["project-id2"])) - - def test_query_with_orderby(self): - data = self.post_json(self.url, - params={"orderby": '[{"project_id": "DESC"}]'}) - - self.assertEqual(3, len(data.json)) - self.assertEqual(["project-id3", "project-id2", "project-id1"], - [s["project_id"] for s in data.json]) - - def test_query_with_field_name_project(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"project": "project-id2"}}'}) - - self.assertEqual(1, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['project_id'], set(["project-id2"])) - - def test_query_with_field_name_resource(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"resource": "resource-id2"}}'}) - - self.assertEqual(1, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['resource_id'], set(["resource-id2"])) - - def test_query_with_wrong_field_name(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"unknown": "resource-id2"}}'}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"is not valid under any of the given schemas", - data.body) - - def test_query_with_wrong_json(self): - data = self.post_json(self.url, - params={"filter": - '{"=": "resource": "resource-id2"}}'}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"Filter expression not valid", data.body) - - def test_query_with_field_name_user(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"user": "user-id2"}}'}) - - self.assertEqual(1, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['user_id'], set(["user-id2"])) - - def test_query_with_field_name_meter(self): - data = self.post_json(self.url, - params={"filter": - '{"=": {"meter": "meter.test"}}'}) - - self.assertEqual(3, len(data.json)) - for sample_item in data.json: - self.assertIn(sample_item['meter'], set(["meter.test"])) - - def test_query_with_lower_and_upper_case_orderby(self): - data = self.post_json(self.url, - params={"orderby": '[{"project_id": "DeSc"}]'}) - - self.assertEqual(3, len(data.json)) - self.assertEqual(["project-id3", "project-id2", "project-id1"], - [s["project_id"] for s in data.json]) - - def test_query_with_user_field_name_orderby(self): - data = self.post_json(self.url, - params={"orderby": '[{"user": "aSc"}]'}) - - self.assertEqual(3, len(data.json)) - self.assertEqual(["user-id1", "user-id2", "user-id3"], - [s["user_id"] for s in data.json]) - - def test_query_with_volume_field_name_orderby(self): - data = self.post_json(self.url, - params={"orderby": '[{"volume": "deSc"}]'}) - - self.assertEqual(3, len(data.json)) - self.assertEqual([3, 2, 1], - [s["volume"] for s in data.json]) - - def test_query_with_missing_order_in_orderby(self): - data = self.post_json(self.url, - params={"orderby": '[{"project_id": ""}]'}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"does not match '(?i)^asc$|^desc$'", data.body) - - def test_query_with_wrong_json_in_orderby(self): - data = self.post_json(self.url, - params={"orderby": '{"project_id": "desc"}]'}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"Order-by expression not valid: Extra data", data.body) - - def test_filter_with_metadata(self): - data = self.post_json(self.url, - params={"filter": - '{">=": {"metadata.util": 0.5}}'}) - - self.assertEqual(2, len(data.json)) - for sample_item in data.json: - self.assertTrue(float(sample_item["metadata"]["util"]) >= 0.5) - - def test_filter_with_negation(self): - filter_expr = '{"not": {">=": {"metadata.util": 0.5}}}' - data = self.post_json(self.url, - params={"filter": filter_expr}) - - self.assertEqual(1, len(data.json)) - for sample_item in data.json: - self.assertTrue(float(sample_item["metadata"]["util"]) < 0.5) - - def test_limit_must_be_positive(self): - data = self.post_json(self.url, - params={"limit": 0}, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertIn(b"Limit must be positive", data.body) - - def test_default_limit(self): - self.CONF.set_override('default_api_return_limit', 1, group='api') - data = self.post_json(self.url, params={}) - self.assertEqual(1, len(data.json)) diff --git a/ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py b/ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py deleted file mode 100644 index fb633035..00000000 --- a/ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py +++ /dev/null @@ -1,193 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test listing raw events. -""" - -import datetime - -import mock -from oslo_utils import timeutils - -from ceilometer.storage import models -from ceilometer.tests.functional.api import v2 - - -class TestComputeDurationByResource(v2.FunctionalTest): - - def setUp(self): - super(TestComputeDurationByResource, self).setUp() - # Create events relative to the range and pretend - # that the intervening events exist. - - self.early1 = datetime.datetime(2012, 8, 27, 7, 0) - self.early2 = datetime.datetime(2012, 8, 27, 17, 0) - - self.start = datetime.datetime(2012, 8, 28, 0, 0) - - self.middle1 = datetime.datetime(2012, 8, 28, 8, 0) - self.middle2 = datetime.datetime(2012, 8, 28, 18, 0) - - self.end = datetime.datetime(2012, 8, 28, 23, 59) - - self.late1 = datetime.datetime(2012, 8, 29, 9, 0) - self.late2 = datetime.datetime(2012, 8, 29, 19, 0) - - def _patch_get_interval(self, start, end): - def get_interval(sample_filter, period, groupby, aggregate): - self.assertIsNotNone(sample_filter.start_timestamp) - self.assertIsNotNone(sample_filter.end_timestamp) - if (sample_filter.start_timestamp > end or - sample_filter.end_timestamp < start): - return [] - duration_start = max(sample_filter.start_timestamp, start) - duration_end = min(sample_filter.end_timestamp, end) - duration = timeutils.delta_seconds(duration_start, duration_end) - return [ - models.Statistics( - unit='', - min=0, - max=0, - avg=0, - sum=0, - count=0, - period=None, - period_start=None, - period_end=None, - duration=duration, - duration_start=duration_start, - duration_end=duration_end, - groupby=None, - ) - ] - return mock.patch.object(type(self.conn), 'get_meter_statistics', - side_effect=get_interval) - - def _invoke_api(self): - return self.get_json('/meters/instance/statistics', - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': self.start.isoformat()}, - {'field': 'timestamp', - 'op': 'le', - 'value': self.end.isoformat()}, - {'field': 'search_offset', - 'value': 10}]) - - def test_before_range(self): - with self._patch_get_interval(self.early1, self.early2): - data = self._invoke_api() - self.assertEqual([], data) - - def _assert_times_match(self, actual, expected): - if actual: - actual = timeutils.parse_isotime(actual) - actual = actual.replace(tzinfo=None) - self.assertEqual(expected, actual) - - def test_overlap_range_start(self): - with self._patch_get_interval(self.early1, self.middle1): - data = self._invoke_api() - self._assert_times_match(data[0]['duration_start'], self.start) - self._assert_times_match(data[0]['duration_end'], self.middle1) - self.assertEqual(8 * 60 * 60, data[0]['duration']) - - def test_within_range(self): - with self._patch_get_interval(self.middle1, self.middle2): - data = self._invoke_api() - self._assert_times_match(data[0]['duration_start'], self.middle1) - self._assert_times_match(data[0]['duration_end'], self.middle2) - self.assertEqual(10 * 60 * 60, data[0]['duration']) - - def test_within_range_zero_duration(self): - with self._patch_get_interval(self.middle1, self.middle1): - data = self._invoke_api() - self._assert_times_match(data[0]['duration_start'], self.middle1) - self._assert_times_match(data[0]['duration_end'], self.middle1) - self.assertEqual(0, data[0]['duration']) - - def test_overlap_range_end(self): - with self._patch_get_interval(self.middle2, self.late1): - data = self._invoke_api() - self._assert_times_match(data[0]['duration_start'], self.middle2) - self._assert_times_match(data[0]['duration_end'], self.end) - self.assertEqual(((6 * 60) - 1) * 60, data[0]['duration']) - - def test_after_range(self): - with self._patch_get_interval(self.late1, self.late2): - data = self._invoke_api() - self.assertEqual([], data) - - def test_without_end_timestamp(self): - statistics = [ - models.Statistics( - unit=None, - count=0, - min=None, - max=None, - avg=None, - duration=None, - duration_start=self.late1, - duration_end=self.late2, - sum=0, - period=None, - period_start=None, - period_end=None, - groupby=None, - ) - ] - with mock.patch.object(type(self.conn), 'get_meter_statistics', - return_value=statistics): - data = self.get_json('/meters/instance/statistics', - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': self.late1.isoformat()}, - {'field': 'resource_id', - 'value': 'resource-id'}, - {'field': 'search_offset', - 'value': 10}]) - self._assert_times_match(data[0]['duration_start'], self.late1) - self._assert_times_match(data[0]['duration_end'], self.late2) - - def test_without_start_timestamp(self): - statistics = [ - models.Statistics( - unit=None, - count=0, - min=None, - max=None, - avg=None, - duration=None, - duration_start=self.early1, - duration_end=self.early2, - sum=0, - period=None, - period_start=None, - period_end=None, - groupby=None, - ) - ] - - with mock.patch.object(type(self.conn), 'get_meter_statistics', - return_value=statistics): - data = self.get_json('/meters/instance/statistics', - q=[{'field': 'timestamp', - 'op': 'le', - 'value': self.early2.isoformat()}, - {'field': 'resource_id', - 'value': 'resource-id'}, - {'field': 'search_offset', - 'value': 10}]) - self._assert_times_match(data[0]['duration_start'], self.early1) - self._assert_times_match(data[0]['duration_end'], self.early2) diff --git a/ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py b/ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py deleted file mode 100644 index 6ed3bdd9..00000000 --- a/ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py +++ /dev/null @@ -1,797 +0,0 @@ -# -# Copyright 2012 Red Hat, Inc. -# Copyright 2013 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test listing meters. -""" - -import base64 -import datetime - -from oslo_serialization import jsonutils -import six -import webtest.app - -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests.functional.api import v2 - - -class TestListEmptyMeters(v2.FunctionalTest): - - def test_empty(self): - data = self.get_json('/meters') - self.assertEqual([], data) - - -class TestValidateUserInput(v2.FunctionalTest): - - def test_list_meters_query_float_metadata(self): - self.assertRaises(webtest.app.AppError, self.get_json, - '/meters/meter.test', - q=[{'field': 'metadata.util', - 'op': 'eq', - 'value': '0.7.5', - 'type': 'float'}]) - self.assertRaises(webtest.app.AppError, self.get_json, - '/meters/meter.test', - q=[{'field': 'metadata.util', - 'op': 'eq', - 'value': 'abacaba', - 'type': 'boolean'}]) - self.assertRaises(webtest.app.AppError, self.get_json, - '/meters/meter.test', - q=[{'field': 'metadata.util', - 'op': 'eq', - 'value': '45.765', - 'type': 'integer'}]) - - -class TestListMetersRestriction(v2.FunctionalTest): - - def setUp(self): - super(TestListMetersRestriction, self).setUp() - self.CONF.set_override('default_api_return_limit', 3, group='api') - for x in range(5): - for i in range(5): - s = sample.Sample( - 'volume.size%s' % x, - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id', - timestamp=(datetime.datetime(2012, 9, 25, 10, 30) + - datetime.timedelta(seconds=i)), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_meter_limit(self): - data = self.get_json('/meters?limit=1') - self.assertEqual(1, len(data)) - - def test_meter_limit_negative(self): - self.assertRaises(webtest.app.AppError, - self.get_json, - '/meters?limit=-2') - - def test_meter_limit_bigger(self): - data = self.get_json('/meters?limit=42') - self.assertEqual(5, len(data)) - - def test_meter_default_limit(self): - data = self.get_json('/meters') - self.assertEqual(3, len(data)) - - def test_old_sample_limit(self): - data = self.get_json('/meters/volume.size0?limit=1') - self.assertEqual(1, len(data)) - - def test_old_sample_limit_negative(self): - self.assertRaises(webtest.app.AppError, - self.get_json, - '/meters/volume.size0?limit=-2') - - def test_old_sample_limit_bigger(self): - data = self.get_json('/meters/volume.size0?limit=42') - self.assertEqual(5, len(data)) - - def test_old_sample_default_limit(self): - data = self.get_json('/meters/volume.size0') - self.assertEqual(3, len(data)) - - def test_sample_limit(self): - data = self.get_json('/samples?limit=1') - self.assertEqual(1, len(data)) - - def test_sample_limit_negative(self): - self.assertRaises(webtest.app.AppError, - self.get_json, - '/samples?limit=-2') - - def test_sample_limit_bigger(self): - data = self.get_json('/samples?limit=42') - self.assertEqual(25, len(data)) - - def test_sample_default_limit(self): - data = self.get_json('/samples') - self.assertEqual(3, len(data)) - - -class TestListMeters(v2.FunctionalTest): - - def setUp(self): - super(TestListMeters, self).setUp() - self.messages = [] - for cnt in [ - sample.Sample( - 'meter.test', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - 'size': 123, - 'util': 0.75, - 'is_public': True}, - source='test_source'), - sample.Sample( - 'meter.test', - 'cumulative', - '', - 3, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 11, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample1', - 'size': 0, - 'util': 0.47, - 'is_public': False}, - source='test_source'), - sample.Sample( - 'meter.mine', - 'gauge', - '', - 1, - 'user-id', - 'project-id', - 'resource-id2', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - 'size': 456, - 'util': 0.64, - 'is_public': False}, - source='test_source'), - sample.Sample( - 'meter.test', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id2', - 'resource-id3', - timestamp=datetime.datetime(2012, 7, 2, 10, 42), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample3', - 'size': 0, - 'util': 0.75, - 'is_public': False}, - source='test_source'), - sample.Sample( - 'meter.test.new', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample3', - 'size': 0, - 'util': 0.75, - 'is_public': False}, - source='test_source'), - - sample.Sample( - 'meter.mine', - 'gauge', - '', - 1, - 'user-id4', - 'project-id2', - 'resource-id4', - timestamp=datetime.datetime(2012, 7, 2, 10, 43), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample4', - 'properties': { - 'prop_1': 'prop_value', - 'prop_2': {'sub_prop_1': - 'sub_prop_value'}, - 'prop.3': {'$sub_prop.2': - 'sub_prop_value2'} - }, - 'size': 0, - 'util': 0.58, - 'is_public': True}, - source='test_source1'), - sample.Sample( - u'meter.accent\xe9\u0437', - 'gauge', - '', - 1, - 'user-id4', - 'project-id2', - 'resource-id4', - timestamp=datetime.datetime(2014, 7, 2, 10, 43), - resource_metadata={}, - source='test_source1')]: - msg = utils.meter_message_from_counter( - cnt, self.CONF.publisher.telemetry_secret) - self.messages.append(msg) - self.conn.record_metering_data(msg) - - def test_list_meters(self): - data = self.get_json('/meters') - self.assertEqual(6, len(data)) - self.assertEqual(set(['resource-id', - 'resource-id2', - 'resource-id3', - 'resource-id4']), - set(r['resource_id'] for r in data)) - self.assertEqual(set(['meter.test', 'meter.mine', 'meter.test.new', - u'meter.accent\xe9\u0437']), - set(r['name'] for r in data)) - self.assertEqual(set(['test_source', 'test_source1']), - set(r['source'] for r in data)) - - def test_list_unique_meters(self): - data = self.get_json('/meters?unique=True') - self.assertEqual(4, len(data)) - self.assertEqual(set(['meter.test', 'meter.mine', 'meter.test.new', - u'meter.accent\xe9\u0437']), - set(r['name'] for r in data)) - - def test_meters_query_with_timestamp(self): - date_time = datetime.datetime(2012, 7, 2, 10, 41) - isotime = date_time.isoformat() - resp = self.get_json('/meters', - q=[{'field': 'timestamp', - 'op': 'gt', - 'value': isotime}], - expect_errors=True) - self.assertEqual(400, resp.status_code) - self.assertEqual('Unknown argument: "timestamp": ' - 'not valid for this resource', - jsonutils.loads(resp.body)['error_message'] - ['faultstring']) - - def test_list_samples(self): - data = self.get_json('/samples') - self.assertEqual(7, len(data)) - - def test_query_samples_with_invalid_field_name_and_non_eq_operator(self): - resp = self.get_json('/samples', - q=[{'field': 'non_valid_field_name', - 'op': 'gt', - 'value': 3}], - expect_errors=True) - resp_string = jsonutils.loads(resp.body) - fault_string = resp_string['error_message']['faultstring'] - msg = ('Unknown argument: "non_valid_field_name"' - ': unrecognized field in query: ' - '[= res['first_sample_timestamp']) - self.assertIn('last_sample_timestamp', res) - self.assertTrue(last.isoformat() <= res['last_sample_timestamp']) - - def test_instance_no_metadata(self): - timestamp = datetime.datetime(2012, 7, 2, 10, 40) - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=timestamp, - resource_metadata=None, - source='test', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - data = self.get_json('/resources') - self.assertEqual(1, len(data)) - self._verify_resource_timestamps(data[0], timestamp, timestamp) - - def test_instances(self): - timestamps = { - 'resource-id': datetime.datetime(2012, 7, 2, 10, 40), - 'resource-id-alternate': datetime.datetime(2012, 7, 2, 10, 41), - } - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=timestamps['resource-id'], - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id-alternate', - timestamp=timestamps['resource-id-alternate'], - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='test', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources') - self.assertEqual(2, len(data)) - for res in data: - timestamp = timestamps.get(res['resource_id']) - self._verify_resource_timestamps(res, timestamp, timestamp) - - def test_instance_multiple_samples(self): - timestamps = [ - datetime.datetime(2012, 7, 2, 10, 41), - datetime.datetime(2012, 7, 2, 10, 42), - datetime.datetime(2012, 7, 2, 10, 40), - ] - for timestamp in timestamps: - datapoint = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=timestamp, - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample-%s' % timestamp, - }, - source='test', - ) - msg = utils.meter_message_from_counter( - datapoint, - self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - data = self.get_json('/resources') - self.assertEqual(1, len(data)) - self._verify_resource_timestamps(data[0], - timestamps[-1], timestamps[1]) - - def test_instances_one(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='test', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources/resource-id') - self.assertEqual('resource-id', data['resource_id']) - - def test_with_source(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='not-test', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources', q=[{'field': 'source', - 'value': 'test_list_resources', - }]) - ids = [r['resource_id'] for r in data] - self.assertEqual(['resource-id'], ids) - sources = [r['source'] for r in data] - self.assertEqual(['test_list_resources'], sources) - - def test_resource_id_with_slash(self): - s = sample.Sample( - 'storage.containers.objects', - 'gauge', - '', - 1, - '19fbed01c21f4912901057021b9e7111', - '45acc90399134206b3b41f3d3a0a06d6', - '29f809d9-88bb-4c40-b1ba-a77a1fcf8ceb/glance', - timestamp=datetime.datetime(2012, 7, 2, 10, 40).isoformat(), - resource_metadata={}, - source='test_show_special_resource', - ) - - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - msg['timestamp'] = datetime.datetime(2012, 7, 2, 10, 40) - self.conn.record_metering_data(msg) - - rid_encoded = '29f809d9-88bb-4c40-b1ba-a77a1fcf8ceb%252Fglance' - resp = self.get_json('/resources/%s' % rid_encoded) - self.assertEqual("19fbed01c21f4912901057021b9e7111", resp["user_id"]) - self.assertEqual('29f809d9-88bb-4c40-b1ba-a77a1fcf8ceb/glance', - resp["resource_id"]) - - def test_with_invalid_resource_id(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id-1', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id', - 'resource-id-2', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='test_list_resources', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - resp1 = self.get_json('/resources/resource-id-1') - self.assertEqual("resource-id-1", resp1["resource_id"]) - - resp2 = self.get_json('/resources/resource-id-2') - self.assertEqual("resource-id-2", resp2["resource_id"]) - - resp3 = self.get_json('/resources/resource-id-3', expect_errors=True) - self.assertEqual(404, resp3.status_code) - json_data = resp3.body - if six.PY3: - json_data = json_data.decode('utf-8') - self.assertEqual("Resource resource-id-3 Not Found", - json.loads(json_data)['error_message'] - ['faultstring']) - - def test_with_user(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='not-test', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources', q=[{'field': 'user_id', - 'value': 'user-id', - }]) - ids = [r['resource_id'] for r in data] - self.assertEqual(['resource-id'], ids) - - def test_with_project(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id2', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='not-test', - ) - msg2 = utils.meter_message_from_counter( - sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources', q=[{'field': 'project_id', - 'value': 'project-id', - }]) - ids = [r['resource_id'] for r in data] - self.assertEqual(['resource-id'], ids) - - def test_with_user_non_admin(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id2', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample1', - }, - source='not-test', - ) - msg2 = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources', - headers={"X-Roles": "Member", - "X-Project-Id": "project-id2"}) - ids = set(r['resource_id'] for r in data) - self.assertEqual(set(['resource-id-alternate']), ids) - - def test_with_user_wrong_tenant(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project-id2', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample1', - }, - source='not-test', - ) - msg2 = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - data = self.get_json('/resources', - headers={"X-Roles": "Member", - "X-Project-Id": "project-wrong"}) - ids = set(r['resource_id'] for r in data) - self.assertEqual(set(), ids) - - def test_metadata(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - 'dict_properties': {'key.$1': {'$key': 'val'}}, - 'not_ignored_list': ['returned'], - }, - source='test', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - data = self.get_json('/resources') - metadata = data[0]['metadata'] - self.assertEqual([(u'dict_properties.key:$1:$key', u'val'), - (u'display_name', u'test-server'), - (u'not_ignored_list', u"['returned']"), - (u'tag', u'self.sample')], - list(sorted(six.iteritems(metadata)))) - - def test_resource_meter_links(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - data = self.get_json('/resources') - links = data[0]['links'] - self.assertEqual(2, len(links)) - self.assertEqual('self', links[0]['rel']) - self.assertTrue((self.PATH_PREFIX + '/resources/resource-id') - in links[0]['href']) - self.assertEqual('instance', links[1]['rel']) - self.assertTrue((self.PATH_PREFIX + '/meters/instance?' - 'q.field=resource_id&q.value=resource-id') - in links[1]['href']) - - def test_resource_skip_meter_links(self): - sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project-id', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - }, - source='test_list_resources', - ) - msg = utils.meter_message_from_counter( - sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - data = self.get_json('/resources?meter_links=0') - links = data[0]['links'] - self.assertEqual(len(links), 1) - self.assertEqual(links[0]['rel'], 'self') - self.assertTrue((self.PATH_PREFIX + '/resources/resource-id') - in links[0]['href']) - - -class TestListResourcesRestriction(v2.FunctionalTest): - def setUp(self): - super(TestListResourcesRestriction, self).setUp() - self.CONF.set_override('default_api_return_limit', 10, group='api') - for i in range(20): - s = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id%s' % i, - timestamp=(datetime.datetime(2012, 9, 25, 10, 30) + - datetime.timedelta(seconds=i)), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_resource_limit(self): - data = self.get_json('/resources?limit=1') - self.assertEqual(1, len(data)) - - def test_resource_limit_negative(self): - self.assertRaises(webtest.app.AppError, self.get_json, - '/resources?limit=-2') - - def test_resource_limit_bigger(self): - data = self.get_json('/resources?limit=42') - self.assertEqual(20, len(data)) - - def test_resource_default_limit(self): - data = self.get_json('/resources') - self.assertEqual(10, len(data)) diff --git a/ceilometer/tests/functional/api/v2/test_list_samples_scenarios.py b/ceilometer/tests/functional/api/v2/test_list_samples_scenarios.py deleted file mode 100644 index 7134a8ca..00000000 --- a/ceilometer/tests/functional/api/v2/test_list_samples_scenarios.py +++ /dev/null @@ -1,156 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test listing raw samples. -""" - -import datetime - -import mock -from oslo_utils import timeutils -import six - -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests.functional.api import v2 - - -class TestListSamples(v2.FunctionalTest): - - def setUp(self): - super(TestListSamples, self).setUp() - patcher = mock.patch.object(timeutils, 'utcnow') - self.addCleanup(patcher.stop) - self.mock_utcnow = patcher.start() - self.mock_utcnow.return_value = datetime.datetime(2014, 2, 11, 16, 42) - self.sample1 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id', - 'project1', - 'resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample', - 'dict_properties': {'key': 'value'}, - 'not_ignored_list': ['returned'], - }, - source='test_source', - ) - msg = utils.meter_message_from_counter( - self.sample1, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - self.sample2 = sample.Sample( - 'instance', - 'cumulative', - '', - 1, - 'user-id2', - 'project2', - 'resource-id-alternate', - timestamp=datetime.datetime(2012, 7, 2, 10, 41), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.sample2', - }, - source='source2', - ) - msg2 = utils.meter_message_from_counter( - self.sample2, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg2) - - def test_all(self): - data = self.get_json('/meters/instance') - self.assertEqual(2, len(data)) - for s in data: - self.assertEqual(timeutils.utcnow().isoformat(), s['recorded_at']) - - def test_all_trailing_slash(self): - data = self.get_json('/meters/instance/') - self.assertEqual(2, len(data)) - - def test_empty_project(self): - data = self.get_json('/meters/instance', - q=[{'field': 'project_id', - 'value': 'no-such-project', - }]) - self.assertEqual([], data) - - def test_by_project(self): - data = self.get_json('/meters/instance', - q=[{'field': 'project_id', - 'value': 'project1', - }]) - self.assertEqual(1, len(data)) - - def test_empty_resource(self): - data = self.get_json('/meters/instance', - q=[{'field': 'resource_id', - 'value': 'no-such-resource', - }]) - self.assertEqual([], data) - - def test_by_resource(self): - data = self.get_json('/meters/instance', - q=[{'field': 'resource_id', - 'value': 'resource-id', - }]) - self.assertEqual(1, len(data)) - - def test_empty_source(self): - data = self.get_json('/meters/instance', - q=[{'field': 'source', - 'value': 'no-such-source', - }]) - self.assertEqual(0, len(data)) - - def test_by_source(self): - data = self.get_json('/meters/instance', - q=[{'field': 'source', - 'value': 'test_source', - }]) - self.assertEqual(1, len(data)) - - def test_empty_user(self): - data = self.get_json('/meters/instance', - q=[{'field': 'user_id', - 'value': 'no-such-user', - }]) - self.assertEqual([], data) - - def test_by_user(self): - data = self.get_json('/meters/instance', - q=[{'field': 'user_id', - 'value': 'user-id', - }]) - self.assertEqual(1, len(data)) - - def test_metadata(self): - data = self.get_json('/meters/instance', - q=[{'field': 'resource_id', - 'value': 'resource-id', - }]) - sample = data[0] - self.assertIn('resource_metadata', sample) - self.assertEqual( - [('dict_properties.key', 'value'), - ('display_name', 'test-server'), - ('not_ignored_list', "['returned']"), - ('tag', 'self.sample'), - ], - list(sorted(six.iteritems(sample['resource_metadata'])))) diff --git a/ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py b/ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py deleted file mode 100644 index 033f2925..00000000 --- a/ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py +++ /dev/null @@ -1,367 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test listing raw events. -""" - -import copy -import datetime -import os - -import mock -from oslo_utils import fileutils -from oslo_utils import timeutils -from oslotest import mockpatch -import six - -from ceilometer.tests.functional.api import v2 - - -class TestPostSamples(v2.FunctionalTest): - def fake_notifier_sample(self, ctxt, event_type, payload): - samples = payload['samples'] - for m in samples: - del m['message_signature'] - self.published.append(samples) - - def _make_app(self, enable_acl=False): - content = ('{"context_is_project": "project_id:%(project_id)s",' - '"default" : "!",' - '"telemetry:create_samples": ""}') - if six.PY3: - content = content.encode('utf-8') - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='policy', - suffix='.json') - self.CONF.set_override("policy_file", self.tempfile, - group='oslo_policy') - return super(TestPostSamples, self)._make_app() - - def tearDown(self): - os.remove(self.tempfile) - super(TestPostSamples, self).tearDown() - - def setUp(self): - self.published = [] - notifier = mock.Mock() - notifier.sample.side_effect = self.fake_notifier_sample - self.useFixture(mockpatch.Patch('oslo_messaging.Notifier', - return_value=notifier)) - super(TestPostSamples, self).setUp() - - def test_one(self): - s1 = [{'counter_name': 'apples', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - data = self.post_json('/meters/apples/', s1) - - # timestamp not given so it is generated. - s1[0]['timestamp'] = data.json[0]['timestamp'] - # Ignore message id that is randomly generated - s1[0]['message_id'] = data.json[0]['message_id'] - # source is generated if not provided. - s1[0]['source'] = '%s:openstack' % s1[0]['project_id'] - - self.assertEqual(s1, data.json) - self.assertEqual(s1[0], self.published[0][0]) - - def test_nested_metadata(self): - s1 = [{'counter_name': 'apples', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'nest.name1': 'value1', - 'name2': 'value2', - 'nest.name2': 'value3'}}] - - data = self.post_json('/meters/apples/', s1) - - # timestamp not given so it is generated. - s1[0]['timestamp'] = data.json[0]['timestamp'] - # Ignore message id that is randomly generated - s1[0]['message_id'] = data.json[0]['message_id'] - # source is generated if not provided. - s1[0]['source'] = '%s:openstack' % s1[0]['project_id'] - - unwound = copy.copy(s1[0]) - unwound['resource_metadata'] = {'nest': {'name1': 'value1', - 'name2': 'value3'}, - 'name2': 'value2'} - # only the published sample should be unwound, not the representation - # in the API response - self.assertEqual(s1[0], data.json[0]) - self.assertEqual(unwound, self.published[0][0]) - - def test_invalid_counter_type(self): - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'INVALID_TYPE', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'closedstack', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - - data = self.post_json('/meters/my_counter_name/', s1, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertEqual(0, len(self.published)) - - def test_messsage_id_provided(self): - """Do not accept sample with message_id.""" - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'message_id': 'evil', - 'source': 'closedstack', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - - data = self.post_json('/meters/my_counter_name/', s1, - expect_errors=True) - - self.assertEqual(400, data.status_int) - self.assertEqual(0, len(self.published)) - - def test_wrong_project_id(self): - """Do not accept cross posting samples to different projects.""" - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'closedstack', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - - data = self.post_json('/meters/my_counter_name/', s1, - expect_errors=True, - headers={ - "X-Roles": "Member", - "X-Tenant-Name": "lu-tenant", - "X-Project-Id": - "bc23a9d531064583ace8f67dad60f6bb", - }) - - self.assertEqual(400, data.status_int) - self.assertEqual(0, len(self.published)) - - def test_multiple_samples(self): - """Send multiple samples. - - The usecase here is to reduce the chatter and send the counters - at a slower cadence. - """ - samples = [] - for x in range(6): - dt = datetime.datetime(2012, 8, 27, x, 0, tzinfo=None) - s = {'counter_name': 'apples', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': float(x * 3), - 'source': 'evil', - 'timestamp': dt.isoformat(), - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': str(x), - 'name2': str(x + 4)}} - samples.append(s) - - data = self.post_json('/meters/apples/', samples) - - for x, s in enumerate(samples): - # source is modified to include the project_id. - s['source'] = '%s:%s' % (s['project_id'], - s['source']) - # Ignore message id that is randomly generated - s['message_id'] = data.json[x]['message_id'] - - # remove tzinfo to compare generated timestamp - # with the provided one - c = data.json[x] - timestamp = timeutils.parse_isotime(c['timestamp']) - c['timestamp'] = timestamp.replace(tzinfo=None).isoformat() - - # do the same on the pipeline - msg = self.published[0][x] - timestamp = timeutils.parse_isotime(msg['timestamp']) - msg['timestamp'] = timestamp.replace(tzinfo=None).isoformat() - - self.assertEqual(s, c) - self.assertEqual(s, self.published[0][x]) - - def test_missing_mandatory_fields(self): - """Do not accept posting samples with missing mandatory fields.""" - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'closedstack', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - - # one by one try posting without a mandatory field. - for m in ['counter_volume', 'counter_unit', 'counter_type', - 'resource_id', 'counter_name']: - s_broke = copy.copy(s1) - del s_broke[0][m] - print('posting without %s' % m) - data = self.post_json('/meters/my_counter_name', s_broke, - expect_errors=True) - self.assertEqual(400, data.status_int) - - def test_multiple_project_id_and_admin(self): - """Allow admin is allowed to set multiple project_id.""" - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'closedstack', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - }, - {'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 2, - 'source': 'closedstack', - 'project_id': '4af38dca-f6fc-11e2-94f5-14dae9283f29', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - data = self.post_json('/meters/my_counter_name/', s1, - headers={"X-Roles": "admin"}) - - self.assertEqual(201, data.status_int) - for x, s in enumerate(s1): - # source is modified to include the project_id. - s['source'] = '%s:%s' % (s['project_id'], - 'closedstack') - # Ignore message id that is randomly generated - s['message_id'] = data.json[x]['message_id'] - # timestamp not given so it is generated. - s['timestamp'] = data.json[x]['timestamp'] - s.setdefault('resource_metadata', dict()) - self.assertEqual(s, data.json[x]) - self.assertEqual(s, self.published[0][x]) - - def test_multiple_samples_multiple_sources(self): - """Test posting with special conditions. - - Do accept a single post with some multiples sources with some of them - null. - """ - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'paperstack', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - }, - {'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 5, - 'source': 'waterstack', - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - }, - {'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 2, - 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68', - 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - data = self.post_json('/meters/my_counter_name/', s1, - expect_errors=True) - self.assertEqual(201, data.status_int) - for x, s in enumerate(s1): - # source is modified to include the project_id. - s['source'] = '%s:%s' % ( - s['project_id'], - s.get('source', self.CONF.sample_source) - ) - # Ignore message id that is randomly generated - s['message_id'] = data.json[x]['message_id'] - # timestamp not given so it is generated. - s['timestamp'] = data.json[x]['timestamp'] - s.setdefault('resource_metadata', dict()) - self.assertEqual(s, data.json[x]) - self.assertEqual(s, self.published[0][x]) - - def test_missing_project_user_id(self): - """Ensure missing project & user IDs are defaulted appropriately.""" - s1 = [{'counter_name': 'my_counter_name', - 'counter_type': 'gauge', - 'counter_unit': 'instance', - 'counter_volume': 1, - 'source': 'closedstack', - 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', - 'resource_metadata': {'name1': 'value1', - 'name2': 'value2'}}] - - project_id = 'bc23a9d531064583ace8f67dad60f6bb' - user_id = 'fd87807-12d2-4b38-9c70-5f5c2ac427ff' - data = self.post_json('/meters/my_counter_name/', s1, - expect_errors=True, - headers={ - 'X-Roles': 'chief-bottle-washer', - 'X-Project-Id': project_id, - 'X-User-Id': user_id, - }) - - self.assertEqual(201, data.status_int) - for x, s in enumerate(s1): - # source is modified to include the project_id. - s['source'] = '%s:%s' % (project_id, - s['source']) - # Ignore message id that is randomly generated - s['message_id'] = data.json[x]['message_id'] - # timestamp not given so it is generated. - s['timestamp'] = data.json[x]['timestamp'] - s['user_id'] = user_id - s['project_id'] = project_id - - self.assertEqual(s, data.json[x]) - self.assertEqual(s, self.published[0][x]) diff --git a/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py b/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py deleted file mode 100644 index cafa1c80..00000000 --- a/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py +++ /dev/null @@ -1,1693 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test events statistics retrieval.""" - -import datetime - -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests import db as tests_db -from ceilometer.tests.functional.api import v2 - - -class TestMaxProjectVolume(v2.FunctionalTest): - PATH = '/meters/volume.size/statistics' - - def setUp(self): - super(TestMaxProjectVolume, self).setUp() - for i in range(3): - s = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id-%s' % i, - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_no_time_bounds(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }]) - self.assertEqual(7, data[0]['max']) - self.assertEqual(3, data[0]['count']) - - def test_start_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - ]) - self.assertEqual(7, data[0]['max']) - self.assertEqual(2, data[0]['count']) - - def test_start_timestamp_after(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T12:34:00', - }, - ]) - self.assertEqual([], data) - - def test_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:30:00', - }, - ]) - self.assertEqual(5, data[0]['max']) - self.assertEqual(1, data[0]['count']) - - def test_end_timestamp_before(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T09:54:00', - }, - ]) - self.assertEqual([], data) - - def test_start_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:32:00', - }, - ]) - self.assertEqual(6, data[0]['max']) - self.assertEqual(1, data[0]['count']) - - -class TestMaxResourceVolume(v2.FunctionalTest): - PATH = '/meters/volume.size/statistics' - - def setUp(self): - super(TestMaxResourceVolume, self).setUp() - for i in range(3): - s = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id', - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_no_time_bounds(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }]) - self.assertEqual(7, data[0]['max']) - self.assertEqual(3, data[0]['count']) - - def test_no_time_bounds_with_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'resource_id', - 'value': 'resource-id'}], - period=3600) - self.assertEqual(3, len(data)) - self.assertEqual(set([u'2012-09-25T10:30:00', - u'2012-09-25T12:32:00', - u'2012-09-25T11:31:00']), - set(x['duration_start'] for x in data)) - self.assertEqual(3600, data[0]['period']) - self.assertEqual(set([u'2012-09-25T10:30:00', - u'2012-09-25T11:30:00', - u'2012-09-25T12:30:00']), - set(x['period_start'] for x in data)) - - def test_period_with_negative_value(self): - resp = self.get_json(self.PATH, expect_errors=True, - q=[{'field': 'resource_id', - 'value': 'resource-id'}], - period=-1) - self.assertEqual(400, resp.status_code) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase') - def test_period_with_large_value(self): - resp = self.get_json(self.PATH, expect_errors=True, - q=[{'field': 'user_id', - 'value': 'user-id'}], - period=10000000000000) - self.assertEqual(400, resp.status_code) - self.assertIn(b"Invalid period", resp.body) - - def test_start_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - ]) - self.assertEqual(7, data[0]['max']) - self.assertEqual(2, data[0]['count']) - - def test_start_timestamp_after(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T12:34:00', - }, - ]) - self.assertEqual([], data) - - def test_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:30:00', - }, - ]) - self.assertEqual(5, data[0]['max']) - self.assertEqual(1, data[0]['count']) - - def test_end_timestamp_before(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T09:54:00', - }, - ]) - self.assertEqual([], data) - - def test_start_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:32:00', - }, - ]) - self.assertEqual(6, data[0]['max']) - self.assertEqual(1, data[0]['count']) - - -class TestSumProjectVolume(v2.FunctionalTest): - - PATH = '/meters/volume.size/statistics' - - def setUp(self): - super(TestSumProjectVolume, self).setUp() - for i in range(3): - s = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id-%s' % i, - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_no_time_bounds(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }]) - expected = 5 + 6 + 7 - self.assertEqual(expected, data[0]['sum']) - self.assertEqual(3, data[0]['count']) - - def test_start_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - ]) - expected = 6 + 7 - self.assertEqual(expected, data[0]['sum']) - self.assertEqual(2, data[0]['count']) - - def test_start_timestamp_after(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T12:34:00', - }, - ]) - self.assertEqual([], data) - - def test_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:30:00', - }, - ]) - self.assertEqual(5, data[0]['sum']) - self.assertEqual(1, data[0]['count']) - - def test_end_timestamp_before(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T09:54:00', - }, - ]) - self.assertEqual([], data) - - def test_start_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'project_id', - 'value': 'project1', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:32:00', - }, - ]) - self.assertEqual(6, data[0]['sum']) - self.assertEqual(1, data[0]['count']) - - -class TestSumResourceVolume(v2.FunctionalTest): - - PATH = '/meters/volume.size/statistics' - - def setUp(self): - super(TestSumResourceVolume, self).setUp() - for i in range(3): - s = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id', - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_no_time_bounds(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }]) - self.assertEqual(5 + 6 + 7, data[0]['sum']) - self.assertEqual(3, data[0]['count']) - - def test_no_time_bounds_with_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'resource_id', - 'value': 'resource-id'}], - period=1800) - self.assertEqual(3, len(data)) - self.assertEqual(set([u'2012-09-25T10:30:00', - u'2012-09-25T12:32:00', - u'2012-09-25T11:31:00']), - set(x['duration_start'] for x in data)) - self.assertEqual(1800, data[0]['period']) - self.assertEqual(set([u'2012-09-25T10:30:00', - u'2012-09-25T11:30:00', - u'2012-09-25T12:30:00']), - set(x['period_start'] for x in data)) - - def test_start_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }]) - self.assertEqual(6 + 7, data[0]['sum']) - self.assertEqual(2, data[0]['count']) - - def test_start_timestamp_with_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'resource_id', - 'value': 'resource-id'}, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T10:15:00'}], - period=7200) - self.assertEqual(2, len(data)) - self.assertEqual(set([u'2012-09-25T10:30:00', - u'2012-09-25T12:32:00']), - set(x['duration_start'] for x in data)) - self.assertEqual(7200, data[0]['period']) - self.assertEqual(set([u'2012-09-25T10:15:00', - u'2012-09-25T12:15:00']), - set(x['period_start'] for x in data)) - - def test_start_timestamp_after(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T12:34:00', - }]) - self.assertEqual([], data) - - def test_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T11:30:00', - }]) - self.assertEqual(5, data[0]['sum']) - self.assertEqual(1, data[0]['count']) - - def test_end_timestamp_before(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'le', - 'value': '2012-09-25T09:54:00', - }]) - self.assertEqual([], data) - - def test_start_end_timestamp(self): - data = self.get_json(self.PATH, q=[{'field': 'resource_id', - 'value': 'resource-id', - }, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2012-09-25T11:30:00', - }, - {'field': 'timestamp', - 'op': 'lt', - 'value': '2012-09-25T11:32:00', - }]) - self.assertEqual(6, data[0]['sum']) - self.assertEqual(1, data[0]['count']) - - -class TestGroupByInstance(v2.FunctionalTest): - - PATH = '/meters/instance/statistics' - - def setUp(self): - super(TestGroupByInstance, self).setUp() - - test_sample_data = ( - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', - 'source': 'source-2'}, - {'volume': 2, 'user': 'user-1', 'project': 'project-2', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', - 'source': 'source-2'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 4, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 4, 'user': 'user-3', 'project': 'project-1', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-3'}, - ) - - for test_sample in test_sample_data: - c = sample.Sample( - 'instance', - sample.TYPE_CUMULATIVE, - unit='s', - volume=test_sample['volume'], - user_id=test_sample['user'], - project_id=test_sample['project'], - resource_id=test_sample['resource'], - timestamp=datetime.datetime(*test_sample['timestamp']), - resource_metadata={'flavor': test_sample['metadata_flavor'], - 'event': test_sample['metadata_event'], }, - source=test_sample['source'], - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_group_by_user(self): - data = self.get_json(self.PATH, groupby=['user_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['user_id']), groupby_keys_set) - self.assertEqual(set(['user-1', 'user-2', 'user-3']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'user_id': 'user-1'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'user_id': 'user-2'}: - self.assertEqual(4, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(8, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'user_id': 'user-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - - def test_group_by_resource(self): - data = self.get_json(self.PATH, groupby=['resource_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), - groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'resource_id': 'resource-1'}: - self.assertEqual(3, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'resource_id': 'resource-2'}: - self.assertEqual(3, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'resource_id': 'resource-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - - def test_group_by_project(self): - data = self.get_json(self.PATH, groupby=['project_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'project_id': 'project-1'}: - self.assertEqual(5, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(10, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'project_id': 'project-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(3, r['avg']) - - def test_group_by_unknown_field(self): - response = self.get_json(self.PATH, - expect_errors=True, - groupby=['wtf']) - self.assertEqual(400, response.status_code) - - def test_group_by_multiple_regular(self): - data = self.get_json(self.PATH, groupby=['user_id', 'resource_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['user_id', 'resource_id']), groupby_keys_set) - self.assertEqual(set(['user-1', 'user-2', 'user-3', 'resource-1', - 'resource-2', 'resource-3']), - groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'user_id': 'user-1', - 'resource_id': 'resource-1'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'user_id': 'user-2', - 'resource_id': 'resource-1'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'user_id': 'user-2', - 'resource_id': 'resource-2'}: - self.assertEqual(3, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'user_id': 'user-3', - 'resource_id': 'resource-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - else: - self.assertNotEqual(grp, {'user_id': 'user-1', - 'resource_id': 'resource-2'}) - self.assertNotEqual(grp, {'user_id': 'user-1', - 'resource_id': 'resource-3'}) - self.assertNotEqual(grp, {'user_id': 'user-2', - 'resource_id': 'resource-3'}) - self.assertNotEqual(grp, {'user_id': 'user-3', - 'resource_id': 'resource-1'}) - self.assertNotEqual(grp, {'user_id': 'user-3', - 'resource_id': 'resource-2'}) - - def test_group_by_with_query_filter(self): - data = self.get_json(self.PATH, - q=[{'field': 'project_id', - 'op': 'eq', - 'value': 'project-1'}], - groupby=['resource_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), - groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'resource_id': 'resource-1'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'resource_id': 'resource-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(1, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(1, r['avg']) - elif grp == {'resource_id': 'resource-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - - def test_group_by_with_query_filter_multiple(self): - data = self.get_json(self.PATH, - q=[{'field': 'user_id', - 'op': 'eq', - 'value': 'user-2'}, - {'field': 'source', - 'op': 'eq', - 'value': 'source-1'}], - groupby=['project_id', 'resource_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id', 'resource_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2', - 'resource-1', 'resource-2']), - groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'project_id': 'project-1', - 'resource_id': 'resource-1'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'project_id': 'project-1', - 'resource_id': 'resource-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(1, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(1, r['avg']) - elif grp == {'project_id': 'project-2', - 'resource_id': 'resource-2'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - else: - self.assertNotEqual(grp, {'project_id': 'project-2', - 'resource_id': 'resource-1'}) - - def test_group_by_with_period(self): - data = self.get_json(self.PATH, - groupby=['project_id'], - period=7200) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set(sub_dict['period_start'] for sub_dict in data) - period_start_valid = set([u'2013-08-01T10:11:00', - u'2013-08-01T14:11:00', - u'2013-08-01T16:11:00']) - self.assertEqual(period_start_valid, period_start_set) - - for r in data: - grp = r['groupby'] - period_start = r['period_start'] - if (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T10:11:00'): - self.assertEqual(3, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(4260, r['duration']) - self.assertEqual(u'2013-08-01T10:11:00', r['duration_start']) - self.assertEqual(u'2013-08-01T11:22:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T12:11:00', r['period_end']) - elif (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T14:11:00'): - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(4260, r['duration']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) - self.assertEqual(u'2013-08-01T16:10:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T16:11:00', r['period_end']) - elif (grp == {'project_id': 'project-2'} and - period_start == u'2013-08-01T14:11:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T15:37:00', r['duration_start']) - self.assertEqual(u'2013-08-01T15:37:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T16:11:00', r['period_end']) - elif (grp == {'project_id': 'project-2'} and - period_start == u'2013-08-01T16:11:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_start']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T18:11:00', r['period_end']) - else: - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-1'}, - u'2013-08-01T16:11:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T10:11:00']) - - def test_group_by_with_query_filter_and_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'source', - 'op': 'eq', - 'value': 'source-1'}], - groupby=['project_id'], - period=7200) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set(sub_dict['period_start'] for sub_dict in data) - period_start_valid = set([u'2013-08-01T10:11:00', - u'2013-08-01T14:11:00', - u'2013-08-01T16:11:00']) - self.assertEqual(period_start_valid, period_start_set) - - for r in data: - grp = r['groupby'] - period_start = r['period_start'] - if (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T10:11:00'): - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(1, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(1, r['avg']) - self.assertEqual(1740, r['duration']) - self.assertEqual(u'2013-08-01T10:11:00', r['duration_start']) - self.assertEqual(u'2013-08-01T10:40:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T12:11:00', r['period_end']) - elif (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T14:11:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T16:11:00', r['period_end']) - elif (grp == {'project_id': 'project-2'} and - period_start == u'2013-08-01T16:11:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_start']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T18:11:00', r['period_end']) - else: - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-1'}, - u'2013-08-01T16:11:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T10:11:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T14:11:00']) - - def test_group_by_start_timestamp_after(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T17:28:01'}], - groupby=['project_id']) - self.assertEqual([], data) - - def test_group_by_end_timestamp_before(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T10:10:59'}], - groupby=['project_id']) - self.assertEqual([], data) - - def test_group_by_start_timestamp(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T14:58:00'}], - groupby=['project_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'project_id': 'project-1'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'project_id': 'project-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(3, r['avg']) - - def test_group_by_end_timestamp(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T11:45:00'}], - groupby=['project_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'project_id': 'project-1'}: - self.assertEqual(3, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(2, r['avg']) - - def test_group_by_start_end_timestamp(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T08:17:03'}, - {'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T23:59:59'}], - groupby=['project_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'project_id': 'project-1'}: - self.assertEqual(5, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(10, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'project_id': 'project-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(6, r['sum']) - self.assertEqual(3, r['avg']) - - def test_group_by_start_end_timestamp_with_query_filter(self): - data = self.get_json(self.PATH, - q=[{'field': 'project_id', - 'op': 'eq', - 'value': 'project-1'}, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T11:01:00'}, - {'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T20:00:00'}], - groupby=['resource_id']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-3']), groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'resource_id': 'resource-1'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'resource_id': 'resource-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - - def test_group_by_start_end_timestamp_with_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T14:00:00'}, - {'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T17:00:00'}], - groupby=['project_id'], - period=3600) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set(sub_dict['period_start'] for sub_dict in data) - period_start_valid = set([u'2013-08-01T14:00:00', - u'2013-08-01T15:00:00', - u'2013-08-01T16:00:00']) - self.assertEqual(period_start_valid, period_start_set) - - for r in data: - grp = r['groupby'] - period_start = r['period_start'] - if (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T14:00:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_end']) - self.assertEqual(3600, r['period']) - self.assertEqual(u'2013-08-01T15:00:00', r['period_end']) - elif (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T16:00:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T16:10:00', r['duration_start']) - self.assertEqual(u'2013-08-01T16:10:00', r['duration_end']) - self.assertEqual(3600, r['period']) - self.assertEqual(u'2013-08-01T17:00:00', r['period_end']) - elif (grp == {'project_id': 'project-2'} and - period_start == u'2013-08-01T15:00:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T15:37:00', r['duration_start']) - self.assertEqual(u'2013-08-01T15:37:00', r['duration_end']) - self.assertEqual(3600, r['period']) - self.assertEqual(u'2013-08-01T16:00:00', r['period_end']) - else: - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-1'}, - u'2013-08-01T15:00:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T14:00:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T16:00:00']) - - def test_group_by_start_end_timestamp_with_query_filter_and_period(self): - data = self.get_json(self.PATH, - q=[{'field': 'source', - 'op': 'eq', - 'value': 'source-1'}, - {'field': 'timestamp', - 'op': 'ge', - 'value': '2013-08-01T10:00:00'}, - {'field': 'timestamp', - 'op': 'le', - 'value': '2013-08-01T18:00:00'}], - groupby=['project_id'], - period=7200) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set(sub_dict['period_start'] for sub_dict in data) - period_start_valid = set([u'2013-08-01T10:00:00', - u'2013-08-01T14:00:00', - u'2013-08-01T16:00:00']) - self.assertEqual(period_start_valid, period_start_set) - - for r in data: - grp = r['groupby'] - period_start = r['period_start'] - if (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T10:00:00'): - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(1, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(1, r['avg']) - self.assertEqual(1740, r['duration']) - self.assertEqual(u'2013-08-01T10:11:00', r['duration_start']) - self.assertEqual(u'2013-08-01T10:40:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T12:00:00', r['period_end']) - elif (grp == {'project_id': 'project-1'} and - period_start == u'2013-08-01T14:00:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(2, r['sum']) - self.assertEqual(2, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_start']) - self.assertEqual(u'2013-08-01T14:59:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T16:00:00', r['period_end']) - elif (grp == {'project_id': 'project-2'} and - period_start == u'2013-08-01T16:00:00'): - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - self.assertEqual(0, r['duration']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_start']) - self.assertEqual(u'2013-08-01T17:28:00', r['duration_end']) - self.assertEqual(7200, r['period']) - self.assertEqual(u'2013-08-01T18:00:00', r['period_end']) - else: - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-1'}, - u'2013-08-01T16:00:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T10:00:00']) - self.assertNotEqual([grp, period_start], - [{'project_id': 'project-2'}, - u'2013-08-01T14:00:00']) - - -@tests_db.run_with('mongodb', 'hbase') -class TestGroupBySource(v2.FunctionalTest): - - # FIXME(terriyu): We have to put test_group_by_source in its own class - # because SQLAlchemy currently doesn't support group by source statistics. - # When group by source is supported in SQLAlchemy, this test should be - # moved to TestGroupByInstance with all the other group by statistics - # tests. - - PATH = '/meters/instance/statistics' - - def setUp(self): - super(TestGroupBySource, self).setUp() - - test_sample_data = ( - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', - 'source': 'source-2'}, - {'volume': 2, 'user': 'user-1', 'project': 'project-2', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', - 'source': 'source-2'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 4, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1'}, - {'volume': 4, 'user': 'user-3', 'project': 'project-1', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-3'}, - ) - - for test_sample in test_sample_data: - c = sample.Sample( - 'instance', - sample.TYPE_CUMULATIVE, - unit='s', - volume=test_sample['volume'], - user_id=test_sample['user'], - project_id=test_sample['project'], - resource_id=test_sample['resource'], - timestamp=datetime.datetime(*test_sample['timestamp']), - resource_metadata={'flavor': test_sample['metadata_flavor'], - 'event': test_sample['metadata_event'], }, - source=test_sample['source'], - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def tearDown(self): - self.conn.clear() - super(TestGroupBySource, self).tearDown() - - def test_group_by_source(self): - data = self.get_json(self.PATH, groupby=['source']) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['source']), groupby_keys_set) - self.assertEqual(set(['source-1', 'source-2', 'source-3']), - groupby_vals_set) - - for r in data: - grp = r['groupby'] - if grp == {'source': 'source-1'}: - self.assertEqual(4, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(1, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(8, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'source': 'source-2'}: - self.assertEqual(2, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(2, r['min']) - self.assertEqual(2, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(2, r['avg']) - elif grp == {'source': 'source-3'}: - self.assertEqual(1, r['count']) - self.assertEqual('s', r['unit']) - self.assertEqual(4, r['min']) - self.assertEqual(4, r['max']) - self.assertEqual(4, r['sum']) - self.assertEqual(4, r['avg']) - - -class TestSelectableAggregates(v2.FunctionalTest): - - PATH = '/meters/instance/statistics' - - def setUp(self): - super(TestSelectableAggregates, self).setUp() - - test_sample_data = ( - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', - 'source': 'source'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 15, 37), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', - 'source': 'source'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-5', 'timestamp': (2013, 8, 1, 10, 11), - 'metadata_flavor': 'm1.medium', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 14, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 5, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 4, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 9, 'user': 'user-3', 'project': 'project-3', - 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 11, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-3', - 'source': 'source'}, - ) - - for test_sample in test_sample_data: - c = sample.Sample( - 'instance', - sample.TYPE_GAUGE, - unit='instance', - volume=test_sample['volume'], - user_id=test_sample['user'], - project_id=test_sample['project'], - resource_id=test_sample['resource'], - timestamp=datetime.datetime(*test_sample['timestamp']), - resource_metadata={'flavor': test_sample['metadata_flavor'], - 'event': test_sample['metadata_event'], }, - source=test_sample['source'], - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def _do_test_per_tenant_selectable_standard_aggregate(self, - aggregate, - expected_values): - agg_args = {'aggregate.func': aggregate} - data = self.get_json(self.PATH, groupby=['project_id'], **agg_args) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - projects = ['project-1', 'project-2', 'project-3'] - self.assertEqual(set(projects), groupby_vals_set) - - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - for r in data: - grp = r['groupby'] - for project in projects: - if grp == {'project_id': project}: - expected = expected_values[projects.index(project)] - self.assertEqual('instance', r['unit']) - self.assertAlmostEqual(r[aggregate], expected) - self.assertIn('aggregate', r) - self.assertIn(aggregate, r['aggregate']) - self.assertAlmostEqual(r['aggregate'][aggregate], expected) - for a in standard_aggregates - set([aggregate]): - self.assertNotIn(a, r) - - def test_per_tenant_selectable_max(self): - self._do_test_per_tenant_selectable_standard_aggregate('max', - [5, 4, 9]) - - def test_per_tenant_selectable_min(self): - self._do_test_per_tenant_selectable_standard_aggregate('min', - [2, 1, 9]) - - def test_per_tenant_selectable_sum(self): - self._do_test_per_tenant_selectable_standard_aggregate('sum', - [9, 9, 9]) - - def test_per_tenant_selectable_avg(self): - self._do_test_per_tenant_selectable_standard_aggregate('avg', - [3, 2.25, 9]) - - def test_per_tenant_selectable_count(self): - self._do_test_per_tenant_selectable_standard_aggregate('count', - [3, 4, 1]) - - def test_per_tenant_selectable_parameterized_aggregate(self): - agg_args = {'aggregate.func': 'cardinality', - 'aggregate.param': 'resource_id'} - data = self.get_json(self.PATH, groupby=['project_id'], **agg_args) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - projects = ['project-1', 'project-2', 'project-3'] - self.assertEqual(set(projects), groupby_vals_set) - - aggregate = 'cardinality/resource_id' - expected_values = [2.0, 3.0, 1.0] - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - for r in data: - grp = r['groupby'] - for project in projects: - if grp == {'project_id': project}: - expected = expected_values[projects.index(project)] - self.assertEqual('instance', r['unit']) - self.assertNotIn(aggregate, r) - self.assertIn('aggregate', r) - self.assertIn(aggregate, r['aggregate']) - self.assertEqual(expected, r['aggregate'][aggregate]) - for a in standard_aggregates: - self.assertNotIn(a, r) - - def test_large_quantum_selectable_parameterized_aggregate(self): - # add a large number of datapoints that won't impact on cardinality - # if the computation logic is tolerant of different DB behavior on - # larger numbers of samples per-period - for i in range(200): - s = sample.Sample( - 'instance', - sample.TYPE_GAUGE, - unit='instance', - volume=i * 1.0, - user_id='user-1', - project_id='project-1', - resource_id='resource-1', - timestamp=datetime.datetime(2013, 8, 1, 11, i % 60), - resource_metadata={'flavor': 'm1.tiny', - 'event': 'event-1', }, - source='source', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - agg_args = {'aggregate.func': 'cardinality', - 'aggregate.param': 'resource_id'} - data = self.get_json(self.PATH, **agg_args) - - aggregate = 'cardinality/resource_id' - expected_value = 5.0 - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - r = data[0] - self.assertNotIn(aggregate, r) - self.assertIn('aggregate', r) - self.assertIn(aggregate, r['aggregate']) - self.assertEqual(expected_value, r['aggregate'][aggregate]) - for a in standard_aggregates: - self.assertNotIn(a, r) - - def test_repeated_unparameterized_aggregate(self): - agg_params = 'aggregate.func=count&aggregate.func=count' - data = self.get_json(self.PATH, override_params=agg_params) - - aggregate = 'count' - expected_value = 8.0 - standard_aggregates = set(['min', 'max', 'sum', 'avg']) - r = data[0] - self.assertIn(aggregate, r) - self.assertEqual(expected_value, r[aggregate]) - self.assertIn('aggregate', r) - self.assertIn(aggregate, r['aggregate']) - self.assertEqual(expected_value, r['aggregate'][aggregate]) - for a in standard_aggregates: - self.assertNotIn(a, r) - - def test_fully_repeated_parameterized_aggregate(self): - agg_params = ('aggregate.func=cardinality&' - 'aggregate.param=resource_id&' - 'aggregate.func=cardinality&' - 'aggregate.param=resource_id&') - data = self.get_json(self.PATH, override_params=agg_params) - - aggregate = 'cardinality/resource_id' - expected_value = 5.0 - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - r = data[0] - self.assertIn('aggregate', r) - self.assertNotIn(aggregate, r) - self.assertIn(aggregate, r['aggregate']) - self.assertEqual(expected_value, r['aggregate'][aggregate]) - for a in standard_aggregates: - self.assertNotIn(a, r) - - def test_partially_repeated_parameterized_aggregate(self): - agg_params = ('aggregate.func=cardinality&' - 'aggregate.param=resource_id&' - 'aggregate.func=cardinality&' - 'aggregate.param=project_id&') - data = self.get_json(self.PATH, override_params=agg_params) - - expected_values = {'cardinality/resource_id': 5.0, - 'cardinality/project_id': 3.0} - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - r = data[0] - self.assertIn('aggregate', r) - for aggregate in expected_values.keys(): - self.assertNotIn(aggregate, r) - self.assertIn(aggregate, r['aggregate']) - self.assertEqual(expected_values[aggregate], - r['aggregate'][aggregate]) - for a in standard_aggregates: - self.assertNotIn(a, r) - - def test_bad_selectable_parameterized_aggregate(self): - agg_args = {'aggregate.func': 'cardinality', - 'aggregate.param': 'injection_attack'} - resp = self.get_json(self.PATH, status=[400], - groupby=['project_id'], **agg_args) - self.assertIn('error_message', resp) - self.assertEqual(resp['error_message'].get('faultcode'), - 'Client') - self.assertEqual(resp['error_message'].get('faultstring'), - 'Bad aggregate: cardinality.injection_attack') - - -@tests_db.run_with('mongodb', 'hbase') -class TestUnparameterizedAggregates(v2.FunctionalTest): - - # We put the stddev test case in a separate class so that we - # can easily exclude the sqlalchemy scenario, as sqlite doesn't - # support the stddev_pop function and fails ungracefully with - # OperationalError when it is used. However we still want to - # test the corresponding functionality in the mongo driver. - # For hbase, the skip on NotImplementedError logic works - # in the usual way. - - PATH = '/meters/instance/statistics' - - def setUp(self): - super(TestUnparameterizedAggregates, self).setUp() - - test_sample_data = ( - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', - 'source': 'source'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 15, 37), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', - 'source': 'source'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-5', 'timestamp': (2013, 8, 1, 10, 11), - 'metadata_flavor': 'm1.medium', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 14, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 5, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 4, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source'}, - {'volume': 9, 'user': 'user-3', 'project': 'project-3', - 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 11, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-3', - 'source': 'source'}, - ) - - for test_sample in test_sample_data: - c = sample.Sample( - 'instance', - sample.TYPE_GAUGE, - unit='instance', - volume=test_sample['volume'], - user_id=test_sample['user'], - project_id=test_sample['project'], - resource_id=test_sample['resource'], - timestamp=datetime.datetime(*test_sample['timestamp']), - resource_metadata={'flavor': test_sample['metadata_flavor'], - 'event': test_sample['metadata_event'], }, - source=test_sample['source'], - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_per_tenant_selectable_unparameterized_aggregate(self): - agg_args = {'aggregate.func': 'stddev'} - data = self.get_json(self.PATH, groupby=['project_id'], **agg_args) - groupby_keys_set = set(x for sub_dict in data - for x in sub_dict['groupby'].keys()) - groupby_vals_set = set(x for sub_dict in data - for x in sub_dict['groupby'].values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - projects = ['project-1', 'project-2', 'project-3'] - self.assertEqual(set(projects), groupby_vals_set) - - aggregate = 'stddev' - expected_values = [1.4142, 1.0897, 0.0] - standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg']) - for r in data: - grp = r['groupby'] - for project in projects: - if grp == {'project_id': project}: - expected = expected_values[projects.index(project)] - self.assertEqual('instance', r['unit']) - self.assertNotIn(aggregate, r) - self.assertIn('aggregate', r) - self.assertIn(aggregate, r['aggregate']) - self.assertAlmostEqual(r['aggregate'][aggregate], - expected, - places=4) - for a in standard_aggregates: - self.assertNotIn(a, r) - - -@tests_db.run_with('mongodb') -class TestBigValueStatistics(v2.FunctionalTest): - - PATH = '/meters/volume.size/statistics' - - def setUp(self): - super(TestBigValueStatistics, self).setUp() - for i in range(0, 3): - s = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - (i + 1) * (10 ** 12), - 'user-id', - 'project1', - 'resource-id', - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.sample', - }, - source='source1', - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_big_value_statistics(self): - data = self.get_json(self.PATH) - - expected_values = {'count': 3, - 'min': 10 ** 12, - 'max': 3 * 10 ** 12, - 'sum': 6 * 10 ** 12, - 'avg': 2 * 10 ** 12} - self.assertEqual(1, len(data)) - for d in data: - for name, expected_value in expected_values.items(): - self.assertIn(name, d) - self.assertEqual(expected_value, d[name]) diff --git a/ceilometer/tests/functional/gabbi/fixtures.py b/ceilometer/tests/functional/gabbi/fixtures.py index a8e81059..44af6adc 100644 --- a/ceilometer/tests/functional/gabbi/fixtures.py +++ b/ceilometer/tests/functional/gabbi/fixtures.py @@ -17,7 +17,6 @@ import datetime import os -import random from unittest import case import uuid @@ -30,8 +29,6 @@ import six from six.moves.urllib import parse as urlparse from ceilometer.event.storage import models -from ceilometer.publisher import utils -from ceilometer import sample from ceilometer import storage # TODO(chdent): For now only MongoDB is supported, because of easy @@ -63,8 +60,6 @@ class ConfigFixture(fixture.GabbiFixture): self.conf([], project='ceilometer', validate_default_values=True) opts.set_defaults(self.conf) conf.import_group('api', 'ceilometer.api.controllers.v2.root') - conf.import_opt('store_events', 'ceilometer.notification', - group='notification') content = ('{"default": ""}') if six.PY3: @@ -81,20 +76,11 @@ class ConfigFixture(fixture.GabbiFixture): 'ceilometer/tests/functional/gabbi/gabbi_paste.ini') ) - # A special pipeline is required to use the direct publisher. - conf.set_override('pipeline_cfg_file', - 'ceilometer/tests/functional/gabbi_pipeline.yaml') - database_name = '%s-%s' % (db_url, str(uuid.uuid4())) conf.set_override('connection', database_name, group='database') - conf.set_override('metering_connection', '', group='database') conf.set_override('event_connection', '', group='database') conf.set_override('pecan_debug', True, group='api') - conf.set_override('gnocchi_is_enabled', False, group='api') - conf.set_override('aodh_is_enabled', False, group='api') - - conf.set_override('store_events', True, group='notification') def stop_fixture(self): """Reset the config and remove data.""" @@ -103,50 +89,13 @@ class ConfigFixture(fixture.GabbiFixture): self.conf.reset() -class SampleDataFixture(fixture.GabbiFixture): - """Instantiate some sample data for use in testing.""" - - def start_fixture(self): - """Create some samples.""" - conf = fixture_config.Config().conf - self.conn = storage.get_connection_from_config(conf) - timestamp = datetime.datetime.utcnow() - project_id = str(uuid.uuid4()) - self.source = str(uuid.uuid4()) - resource_metadata = {'farmed_by': 'nancy'} - - for name in ['cow', 'pig', 'sheep']: - resource_metadata.update({'breed': name}), - c = sample.Sample(name='livestock', - type='gauge', - unit='head', - volume=int(10 * random.random()), - user_id='farmerjon', - project_id=project_id, - resource_id=project_id, - timestamp=timestamp, - resource_metadata=resource_metadata, - source=self.source) - data = utils.meter_message_from_counter( - c, conf.publisher.telemetry_secret) - self.conn.record_metering_data(data) - - def stop_fixture(self): - """Destroy the samples.""" - # NOTE(chdent): print here for sake of info during testing. - # This will go away eventually. - print('resource', - self.conn.db.resource.remove({'source': self.source})) - print('meter', self.conn.db.meter.remove({'source': self.source})) - - class EventDataFixture(fixture.GabbiFixture): """Instantiate some sample event data for use in testing.""" def start_fixture(self): """Create some events.""" conf = fixture_config.Config().conf - self.conn = storage.get_connection_from_config(conf, 'event') + self.conn = storage.get_connection_from_config(conf) events = [] name_list = ['chocolate.chip', 'peanut.butter', 'sugar'] for ix, name in enumerate(name_list): diff --git a/ceilometer/tests/functional/gabbi/gabbi_pipeline.yaml b/ceilometer/tests/functional/gabbi/gabbi_pipeline.yaml deleted file mode 100644 index 35250176..00000000 --- a/ceilometer/tests/functional/gabbi/gabbi_pipeline.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# A limited pipeline for use with the Gabbi spike. -# direct writes to the metering database without using an -# intermediary dispatcher. -# -# This is one of several things that will need some extensive -# tidying to be more right. ---- -sources: - - name: meter_source - interval: 1 - meters: - - "*" - sinks: - - meter_sink -sinks: - - name: meter_sink - transformers: - publishers: - - direct:// diff --git a/ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml b/ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml index 5b9c9164..1a44c8d5 100644 --- a/ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml +++ b/ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml @@ -11,4 +11,3 @@ tests: url: /v2/capabilities response_json_paths: $.event_storage.['storage:production_ready']: true - $.storage.['storage:production_ready']: true diff --git a/ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml b/ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml deleted file mode 100644 index 0d5927a8..00000000 --- a/ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml +++ /dev/null @@ -1,104 +0,0 @@ -# Post a simple sample, sir, and the retrieve it in various ways. -fixtures: - - ConfigFixture - -tests: - -# POST one sample and verify its existence. - - - name: post sample for meter - desc: post a single sample - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: | - [ - { - "counter_name": "apples", - "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68", - "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff", - "counter_unit": "instance", - "counter_volume": 1, - "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36", - "resource_metadata": { - "name2": "value2", - "name1": "value1" - }, - "counter_type": "gauge" - } - ] - - response_json_paths: - $.[0].counter_name: apples - status: 201 - response_headers: - content-type: application/json; charset=UTF-8 - -# When POSTing a sample perhaps we should get back a location header -# with the URI of the posted sample - - - name: post a sample expect location - desc: https://bugs.launchpad.net/ceilometer/+bug/1426426 - xfail: true - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - project_id: 35b17138-b364-4e6a-a131-8f3099c5be68 - user_id: efd87807-12d2-4b38-9c70-5f5c2ac427ff - counter_unit: instance - counter_volume: 1 - resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - resource_metadata: - name2: value2 - name1: value1 - counter_type: gauge - response_headers: - location: /$SCHEME://$NETLOC/ - -# GET all the samples created for the apples meter - - - name: get samples for meter - desc: get all the samples at that meter - url: /v2/meters/apples - response_json_paths: - $.[0].counter_name: apples - $.[0].counter_volume: 1 - $.[0].resource_metadata.name2: value2 - -# POSTing a sample to a meter will implicitly create a resource - - - name: get resources - desc: get the resources that exist because of the sample - url: /v2/resources - response_json_paths: - $.[0].metadata.name2: value2 - -# NOTE(chdent): We assume that the first item in links is self. -# Need to determine how to express the more correct JSONPath here -# (if possible). - - - name: get resource - desc: get just one of those resources via self - url: $RESPONSE['$[0].links[0].href'] - response_json_paths: - $.metadata.name2: value2 - -# GET the created samples - - - name: get samples - desc: get all the created samples - url: /v2/samples - response_json_paths: - $.[0].metadata.name2: value2 - $.[0].meter: apples - - - name: get one sample - desc: get the one sample that exists - url: /v2/samples/$RESPONSE['$[0].id'] - response_json_paths: - $.metadata.name2: value2 - $.meter: apples diff --git a/ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml b/ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml deleted file mode 100644 index 94369703..00000000 --- a/ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# -# Demonstrate a simple sample fixture. -# -fixtures: - - ConfigFixture - - SampleDataFixture - -tests: -- name: get fixture samples - desc: get all the samples at livestock - url: /v2/meters/livestock - response_json_paths: - $.[0].counter_name: livestock - $.[1].counter_name: livestock - $.[2].counter_name: livestock - $.[2].user_id: farmerjon - $.[0].resource_metadata.breed: cow - $.[1].resource_metadata.farmed_by: nancy diff --git a/ceilometer/tests/functional/gabbi/gabbits/meters.yaml b/ceilometer/tests/functional/gabbi/gabbits/meters.yaml deleted file mode 100644 index 65bb45a5..00000000 --- a/ceilometer/tests/functional/gabbi/gabbits/meters.yaml +++ /dev/null @@ -1,401 +0,0 @@ -# -# Tests to explore and cover the /v2/meters section of the -# Ceilometer API. -# - -fixtures: - - ConfigFixture - -tests: - -# Generic HTTP health explorations of all meters. - - - name: empty meters list - url: /v2/meters - response_headers: - content-type: /application/json/ - response_strings: - - "[]" - - - name: meters list bad accept - url: /v2/meters - request_headers: - accept: text/plain - status: 406 - - - name: meters list bad method - url: /v2/meters - method: POST - status: 405 - response_headers: - allow: GET - - - name: try to delete meters - url: /v2/meters - method: DELETE - status: 405 - response_headers: - allow: GET - -# Generic HTTP health explorations of single meter. - - - name: get non exist meter - url: /v2/meters/noexist - response_strings: - - "[]" - - - name: meter bad accept - url: /v2/meters/noexist?direct=True - request_headers: - accept: text/plain - status: 406 - - - name: meter delete noexist - url: /v2/meters/noexist - method: DELETE - status: "404 || 405" - - - name: post meter no data - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: "" - status: 400 - - - name: post meter error is JSON - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: "" - status: 400 - response_headers: - content-type: /application/json/ - response_json_paths: - $.error_message.faultstring: "Samples should be included in request body" - - - name: post meter bad content-type - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: text/plain - data: hello - status: 415 - - - name: post bad samples to meter - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - samples: - - red - - blue - - yellow - status: 400 - -# POST variations on a malformed sample - - - name: post limited counter to meter - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_unit: instance - counter_volume: 1 - resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - status: 400 - response_strings: - - "Invalid input for field/attribute counter_name" - - - name: post mismatched counter name to meter - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: cars - counter_type: gauge - counter_unit: instance - counter_volume: 1 - resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - status: 400 - response_strings: - - "Invalid input for field/attribute counter_name" - - "should be apples" - - - name: post counter no resource to meter - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 1 - status: 400 - response_strings: - - "Invalid input for field/attribute resource_id" - - "Mandatory field missing." - - - name: post counter bad type to meter - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: elevation - counter_unit: instance - counter_volume: 1 - resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - status: 400 - response_strings: - - "Invalid input for field/attribute counter_type." - - "The counter type must be: gauge, delta, cumulative" - -# Manipulate samples - - - name: post counter to meter - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 1 - resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - status: 201 - - - name: list apple samples - url: /v2/meters/apples - response_json_paths: - $[0].counter_volume: 1.0 - $[0].counter_name: apples - $[0].resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - - - name: list meters - url: /v2/meters - response_json_paths: - $[0].name: apples - $[0].resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36 - $[0].type: gauge - $[-1].name: apples - - - name: negative limit on meter list - url: /v2/meters/apples?limit=-5 - status: 400 - response_strings: - - Limit must be positive - - - name: nan limit on meter list - url: /v2/meters/apples?limit=NaN - status: 400 - response_strings: - - unable to convert to int - - - name: post counter to meter different resource - url: /v2/meters/apples?direct=True - method: POST - status: 201 - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 2 - resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - - - name: query for resource - url: /v2/meters/apples?q.field=resource_id&q.value=aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa&q.op=eq - response_json_paths: - $[0].resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - $[-1].resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - -# Explore posting samples with less than perfect data. - - - name: post counter with bad timestamp - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 3 - resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - timestamp: "2013-01-bad 23:23:20" - status: 400 - response_strings: - - 'Invalid input for field/attribute samples' - - - name: post counter with good timestamp - url: /v2/meters/apples?direct=True - method: POST - status: 201 - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 3 - resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - timestamp: "2013-01-01 23:23:20" - - - name: post counter with wrong metadata - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 3 - resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - timestamp: "2013-01-01 23:23:20" - resource_metadata: "a string" - status: 400 - response_strings: - - "Invalid input for field/attribute samples" - - - name: post counter with empty metadata - url: /v2/meters/apples?direct=True - method: POST - status: 201 - request_headers: - content-type: application/json - data: - - counter_name: apples - counter_type: gauge - counter_unit: instance - counter_volume: 3 - resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa - timestamp: "2013-01-01 23:23:20" - resource_metadata: {} - -# Statistics - - - name: get sample statistics - url: /v2/meters/apples/statistics - response_json_paths: - $[0].groupby: null - $[0].unit: instance - $[0].sum: 9.0 - $[0].min: 1.0 - $[0].max: 3.0 - $[0].count: 4 - - - name: get incorrectly grouped sample statistics - url: /v2/meters/apples/statistics?groupby=house_id - status: 400 - response_strings: - - Invalid groupby fields - - - name: get grouped sample statistics - url: /v2/meters/apples/statistics?groupby=resource_id - response_json_paths: - $[1].max: 3.0 - $[0].max: 1.0 - - - name: get sample statistics bad period - url: /v2/meters/apples/statistics?period=seven - status: 400 - response_strings: - - unable to convert to int - - - name: get sample statistics negative period - url: /v2/meters/apples/statistics?period=-7 - status: 400 - response_strings: - - Period must be positive. - - - name: get sample statistics 600 period - url: /v2/meters/apples/statistics?period=600 - response_json_paths: - $[0].period: 600 - - - name: get sample statistics time limit not time - url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=Remember%20Remember - status: 400 - response_strings: - - invalid timestamp format - - - name: get sample statistics time limit gt - url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=2014-01-01 - response_json_paths: - $[0].count: 2 - - - name: get sample statistics time limit lt - url: /v2/meters/apples/statistics?q.field=timestamp&q.op=lt&q.value=2014-01-01 - response_json_paths: - $[0].count: 2 - - - name: get sample statistics time limit bounded - url: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=2013-06-01&q.field=timestamp&q.op=lt&q.value=2014-01-01 - response_strings: - - "[]" - - - name: get sample statistics select aggregate bad format - url: /v2/meters/apples/statistics?aggregate=max - status: 400 - - - name: get sample statistics select aggregate - url: /v2/meters/apples/statistics?aggregate.func=max - response_json_paths: - $[0].aggregate.max: 3.0 - - - name: get sample statistics select aggregate multiple - url: /v2/meters/apples/statistics?aggregate.func=max&aggregate.func=count - response_json_paths: - $[0].aggregate.max: 3.0 - $[0].aggregate.count: 4 - - - name: get sample statistics select aggregate bad function - url: /v2/meters/apples/statistics?aggregate.func=mmm - status: 400 - response_strings: - - 'Invalid aggregation function: mmm' - - - name: get sample statistics select aggregate good function and bad function - url: /v2/meters/apples/statistics?aggregate.func=max&aggregate.func=mmm - status: 400 - response_strings: - - 'Invalid aggregation function: mmm' - -# limit meters results - - - name: get meters ulimited - url: /v2/meters - response_json_paths: - $.`len`: 2 - - - name: get meters limited - url: /v2/meters?limit=1 - response_json_paths: - $.`len`: 1 - - - name: get meters double limit - url: /v2/meters?limit=1&limit=1 - status: 400 - - - name: get meters filter limit - desc: expressing limit this way is now disallowed - url: /v2/meters?q.field=limit&q.op=eq&q.type=&q.value=1 - status: 400 - response_strings: - - 'Unknown argument: \"limit\": unrecognized field in query' - - - name: get meters filter limit and limit - url: /v2/meters?q.field=limit&q.op=eq&q.type=&q.value=1&limit=1 - status: 400 - response_strings: - - 'Unknown argument: \"limit\": unrecognized field in query' diff --git a/ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml b/ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml deleted file mode 100644 index 44d407ce..00000000 --- a/ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# -# Explore and cover resources API with gabbi tests when there are no -# resources. -# - -fixtures: - - ConfigFixture - -tests: - -# Check for a list of resources, modifying the request in various -# ways. - - - name: list resources no extra - desc: Provide no additional header guidelines - url: /v2/resources - response_headers: - content-type: /application/json/ - response_strings: - - "[]" - - - name: list resources but get url wrong - url: /v2/resrces - status: 404 - - - name: list resources explicit accept - url: /v2/resources - request_headers: - accept: application/json - response_strings: - - "[]" - - - name: list resources bad accept - url: /v2/resources - request_headers: - accept: text/plain - status: 406 - - - name: list resources with bad query field - url: /v2/resources?q.field=id&q.value=cars - status: 400 - response_strings: - - unrecognized field in query - - - name: list resources with query - url: /v2/resources?q.field=resource&q.value=cars - response_strings: - - "[]" - - - name: list resource bad type meter links - url: /v2/resources?meter_links=yes%20please - status: 400 - response_strings: - - unable to convert to int - - - name: list resource meter links int - url: /v2/resources?meter_links=0 - response_strings: - - "[]" diff --git a/ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml b/ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml deleted file mode 100644 index cf138c27..00000000 --- a/ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml +++ /dev/null @@ -1,86 +0,0 @@ -# -# Explore and cover resources API with gabbi tests when there are a -# small number of pre-existing resources -# - -fixtures: - - ConfigFixture - - SampleDataFixture - -tests: - - - name: list all resources - url: /v2/resources - response_json_paths: - $[0].user_id: farmerjon - $[0].links[1].rel: livestock - - - name: get one resource - desc: get a resource via the links in the first resource listed above - url: $RESPONSE['$[0].links[0].href'] - response_json_paths: - $.resource_id: $RESPONSE['$[0].resource_id'] - - - name: list resources limit user_id - url: /v2/resources?q.field=user_id&q.value=farmerjon - response_json_paths: - $[0].user_id: farmerjon - $[0].links[1].rel: livestock - - - name: list resources limit metadata - url: /v2/resources?q.field=metadata.breed&q.value=sheep - response_json_paths: - $[0].user_id: farmerjon - $[0].links[1].rel: livestock - - - name: list resources limit metadata no match - url: /v2/resources?q.field=metadata.breed&q.value=llamma - response_strings: - - "[]" - - - name: fail to get one resource - url: /v2/resources/nosirnothere - status: 404 - - - name: list resource meter links present - url: /v2/resources?meter_links=1 - response_json_paths: - $[0].links[0].rel: self - $[0].links[1].rel: livestock - $[0].links[-1].rel: livestock - - - name: list resource meter links not present - url: /v2/resources?meter_links=0 - desc: there is only one links entry when meter_links is 0 - response_json_paths: - $[0].links[0].rel: self - $[0].links[-1].rel: self - -# limit resource results - - - name: get resources ulimited - url: /v2/resources - response_json_paths: - $.`len`: 1 - - - name: get resources limited - url: /v2/resources?limit=1 - response_json_paths: - $.`len`: 1 - - - name: get resources double limit - url: /v2/resources?limit=1&limit=1 - status: 400 - - - name: get resources filter limit - desc: expressing limit this way is now disallowed - url: /v2/resources?q.field=limit&q.op=eq&q.type=&q.value=1 - status: 400 - response_strings: - - 'Unknown argument: \"limit\": unrecognized field in query' - - - name: get resources filter limit and limit - url: /v2/resources?q.field=limit&q.op=eq&q.type=&q.value=1&limit=1 - status: 400 - response_strings: - - 'Unknown argument: \"limit\": unrecognized field in query' diff --git a/ceilometer/tests/functional/gabbi/gabbits/samples.yaml b/ceilometer/tests/functional/gabbi/gabbits/samples.yaml deleted file mode 100644 index be568c32..00000000 --- a/ceilometer/tests/functional/gabbi/gabbits/samples.yaml +++ /dev/null @@ -1,155 +0,0 @@ -# -# Explore and test the samples controller, using samples supplied by -# the SampleDataFixture. -# - -fixtures: - - ConfigFixture - - SampleDataFixture - -tests: - -# Confirm all the samples are there and expected requests behave. -# TODO(chdent): There's a danger here that the ordering of multiple -# samples will not be consistent. - - - name: lists samples - url: /v2/samples - response_headers: - content-type: /application/json/ - response_json_paths: - $[0].meter: livestock - $[0].metadata.breed: cow - $[1].metadata.breed: pig - $[2].metadata.breed: sheep - - - name: get just one - url: /v2/samples/$RESPONSE['$[0].id'] - response_json_paths: - $.meter: livestock - $.metadata.breed: cow - - - name: list samples with limit - url: /v2/samples?limit=1 - response_json_paths: - $[0].meter: livestock - $[0].metadata.breed: cow - $[-1].metadata.breed: cow - - - name: list zero samples with zero limit - url: /v2/samples?limit=0 - status: 400 - - - name: list samples with query - url: /v2/samples?q.field=resource_metadata.breed&q.value=cow&q.op=eq - response_json_paths: - $[0].meter: livestock - $[0].metadata.breed: cow - $[-1].metadata.breed: cow - - - name: query by user - url: /v2/samples?q.field=user&q.value=$RESPONSE['$[0].user_id']&q.op=eq - response_json_paths: - $[0].user_id: $RESPONSE['$[0].user_id'] - - - name: query by user_id - url: /v2/samples?q.field=user_id&q.value=$RESPONSE['$[0].user_id']&q.op=eq - response_json_paths: - $[0].user_id: $RESPONSE['$[0].user_id'] - - - name: query by project - url: /v2/samples?q.field=project&q.value=$RESPONSE['$[0].project_id']&q.op=eq - response_json_paths: - $[0].project_id: $RESPONSE['$[0].project_id'] - - - name: query by project_id - url: /v2/samples?q.field=project_id&q.value=$RESPONSE['$[0].project_id']&q.op=eq - response_json_paths: - $[0].project_id: $RESPONSE['$[0].project_id'] - -# Explore failure modes for listing samples - - - name: list samples with bad field - url: /v2/samples?q.field=harpoon&q.value=cow&q.op=eq - status: 400 - response_strings: - - timestamp - - project - - unrecognized field in query - - - name: list samples with bad metaquery field - url: /v2/samples?q.field=metaquery&q.value=cow&q.op=eq - status: 400 - response_strings: - - unrecognized field in query - - - name: bad limit value - url: /v2/samples?limit=happiness - status: 400 - response_strings: - - Invalid input for field/attribute limit - - - name: negative limit value 400 - url: /v2/samples?limit=-99 - status: 400 - - - name: negative limit value error message - url: /v2/samples?limit=-99 - status: 400 - response_headers: - content-type: /application/json/ - response_json_paths: - $.error_message.faultstring: Limit must be positive - - - name: bad accept - desc: try an unexpected content type - url: /v2/samples - request_headers: - accept: text/plain - status: 406 - - - name: complex good accept - desc: client sends complex accept do we adapt - url: /v2/samples - request_headers: - accept: text/plain, application/json; q=0.8 - - - name: complex bad accept - desc: client sends complex accept do we adapt - url: /v2/samples - request_headers: - accept: text/plain, application/binary; q=0.8 - status: 406 - - - name: bad method - url: /v2/samples - method: POST - status: 405 - response_headers: - allow: GET - -# Work with just one sample. - - - name: list one of the samples - url: /v2/samples?limit=1 - - - name: retrieve one sample - url: /v2/samples/$RESPONSE['$[0].id'] - response_headers: - content-type: /application/json/ - response_json_paths: - $.meter: livestock - - - name: retrieve sample with useless query - url: /v2/samples/$RESPONSE['$.id']?limit=5 - status: 400 - response_strings: - - "Unknown argument:" - - - name: attempt missing sample - url: /v2/samples/davesnothere - status: 404 - response_headers: - content-type: /application/json/ - response_json_paths: - $.error_message.faultstring: Sample davesnothere Not Found diff --git a/ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml b/ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml deleted file mode 100644 index 61f7c816..00000000 --- a/ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# Post a simple sample and confirm the created resource has -# reasonable URLs -fixtures: - - ConfigFixture - -tests: - -# POST one sample and verify its existence. - - - name: post sample for meter - desc: post a single sample - url: /v2/meters/apples?direct=True - method: POST - request_headers: - content-type: application/json - data: | - [ - { - "counter_name": "apples", - "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68", - "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff", - "counter_unit": "instance", - "counter_volume": 1, - "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36", - "resource_metadata": { - "name2": "value2", - "name1": "value1" - }, - "counter_type": "gauge" - } - ] - - response_json_paths: - $.[0].counter_name: apples - status: 201 - response_headers: - content-type: application/json; charset=UTF-8 - - - name: get resources - desc: get the resources that exist because of the sample - url: /v2/resources - response_json_paths: - $.[0].metadata.name2: value2 - - - name: get resource - desc: get just one of those resources via self - url: $RESPONSE['$[0].links[0].href'] - response_json_paths: - $.metadata.name2: value2 - response_strings: - - /telemetry/ diff --git a/ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml b/ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml deleted file mode 100644 index ce4811de..00000000 --- a/ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# -# Explore and cover resources API with gabbi tests when there are a -# small number of pre-existing resources -# - -fixtures: - - ConfigFixture - - SampleDataFixture - -tests: - - - name: list all resources - url: /v2/resources - response_json_paths: - $[0].user_id: farmerjon - $[0].links[1].rel: livestock - response_strings: - - /telemetry/ - - - name: get one resource - desc: get a resource via the links in the first resource listed above - url: $RESPONSE['$[0].links[0].href'] - response_json_paths: - $.resource_id: $RESPONSE['$[0].resource_id'] diff --git a/ceilometer/tests/functional/publisher/test_direct.py b/ceilometer/tests/functional/publisher/test_direct.py deleted file mode 100644 index bc800e8e..00000000 --- a/ceilometer/tests/functional/publisher/test_direct.py +++ /dev/null @@ -1,99 +0,0 @@ -# -# Copyright 2015 Red Hat -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/direct.py -""" - -import datetime -import uuid - -from oslo_utils import netutils - -from ceilometer.event.storage import models as event -from ceilometer.publisher import direct -from ceilometer import sample -from ceilometer.tests import db as tests_db - - -class TestDirectPublisher(tests_db.TestBase): - - resource_id = str(uuid.uuid4()) - - test_data = [ - sample.Sample( - name='alpha', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id=resource_id, - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='beta', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id=resource_id, - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='gamma', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id=resource_id, - timestamp=datetime.datetime.now().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - ] - - def test_direct_publisher(self): - """Test samples are saved.""" - self.CONF.set_override('connection', self.db_manager.url, - group='database') - parsed_url = netutils.urlsplit('direct://') - publisher = direct.DirectPublisher(parsed_url) - publisher.publish_samples(self.test_data) - - meters = list(self.conn.get_meters(resource=self.resource_id)) - names = sorted([meter.name for meter in meters]) - - self.assertEqual(3, len(meters), 'There should be 3 samples') - self.assertEqual(['alpha', 'beta', 'gamma'], names) - - -class TestEventDirectPublisher(tests_db.TestBase): - test_data = [event.Event(message_id=str(uuid.uuid4()), - event_type='event_%d' % i, - generated=datetime.datetime.utcnow(), - traits=[], raw={}) - for i in range(0, 5)] - - def test_direct_publisher(self): - parsed_url = netutils.urlsplit('direct://') - publisher = direct.DirectPublisher(parsed_url) - publisher.publish_events(self.test_data) - - e_types = list(self.event_conn.get_event_types()) - self.assertEqual(5, len(e_types)) - self.assertEqual(['event_%d' % i for i in range(0, 5)], - sorted(e_types)) diff --git a/ceilometer/tests/functional/storage/test_impl_hbase.py b/ceilometer/tests/functional/storage/test_impl_hbase.py deleted file mode 100644 index 16ec441a..00000000 --- a/ceilometer/tests/functional/storage/test_impl_hbase.py +++ /dev/null @@ -1,103 +0,0 @@ -# -# Copyright 2012, 2013 Dell Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/storage/impl_hbase.py - -.. note:: - In order to run the tests against real HBase server set the environment - variable CEILOMETER_TEST_HBASE_URL to point to that HBase instance before - running the tests. Make sure the Thrift server is running on that server. - -""" -import mock - - -try: - import happybase # noqa -except ImportError: - import testtools.testcase - raise testtools.testcase.TestSkipped("happybase is needed") - -from ceilometer.event.storage import impl_hbase as hbase_event -from ceilometer.storage import impl_hbase as hbase -from ceilometer.tests import base as test_base -from ceilometer.tests import db as tests_db - - -class ConnectionTest(tests_db.TestBase): - - @tests_db.run_with('hbase') - def test_hbase_connection(self): - - class TestConn(object): - def __init__(self, host, port): - self.netloc = '%s:%s' % (host, port) - - def open(self): - pass - - def get_connection_pool(conf): - return TestConn(conf['host'], conf['port']) - - with mock.patch.object(hbase.Connection, '_get_connection_pool', - side_effect=get_connection_pool): - conn = hbase.Connection('hbase://test_hbase:9090') - self.assertIsInstance(conn.conn_pool, TestConn) - - -class CapabilitiesTest(test_base.BaseTestCase): - # Check the returned capabilities list, which is specific to each DB - # driver - - def test_capabilities(self): - expected_capabilities = { - 'meters': {'query': {'simple': True, - 'metadata': True}}, - 'resources': {'query': {'simple': True, - 'metadata': True}}, - 'samples': {'query': {'simple': True, - 'metadata': True, - 'complex': False}}, - 'statistics': {'groupby': False, - 'query': {'simple': True, - 'metadata': True}, - 'aggregation': {'standard': True, - 'selectable': { - 'max': False, - 'min': False, - 'sum': False, - 'avg': False, - 'count': False, - 'stddev': False, - 'cardinality': False}} - }, - } - - actual_capabilities = hbase.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_event_capabilities(self): - expected_capabilities = { - 'events': {'query': {'simple': True}}, - } - - actual_capabilities = hbase_event.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_storage_capabilities(self): - expected_capabilities = { - 'storage': {'production_ready': True}, - } - actual_capabilities = hbase.Connection.get_storage_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) diff --git a/ceilometer/tests/functional/storage/test_impl_log.py b/ceilometer/tests/functional/storage/test_impl_log.py deleted file mode 100644 index 2637e034..00000000 --- a/ceilometer/tests/functional/storage/test_impl_log.py +++ /dev/null @@ -1,29 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/storage/impl_log.py -""" -from oslotest import base - -from ceilometer.storage import impl_log - - -class ConnectionTest(base.BaseTestCase): - @staticmethod - def test_get_connection(): - conn = impl_log.Connection(None) - conn.record_metering_data({'counter_name': 'test', - 'resource_id': __name__, - 'counter_volume': 1, - }) diff --git a/ceilometer/tests/functional/storage/test_impl_mongodb.py b/ceilometer/tests/functional/storage/test_impl_mongodb.py index 87076809..3dd3ce74 100644 --- a/ceilometer/tests/functional/storage/test_impl_mongodb.py +++ b/ceilometer/tests/functional/storage/test_impl_mongodb.py @@ -21,24 +21,11 @@ """ -from ceilometer.event.storage import impl_mongodb as impl_mongodb_event -from ceilometer.storage import impl_mongodb +from ceilometer.event.storage import impl_mongodb from ceilometer.tests import base as test_base from ceilometer.tests import db as tests_db -@tests_db.run_with('mongodb') -class MongoDBConnection(tests_db.TestBase): - def test_connection_pooling(self): - test_conn = impl_mongodb.Connection(self.db_manager.url) - self.assertEqual(self.conn.conn, test_conn.conn) - - def test_replica_set(self): - url = self.db_manager._url + '?replicaSet=foobar' - conn = impl_mongodb.Connection(url) - self.assertTrue(conn.conn) - - @tests_db.run_with('mongodb') class IndexTest(tests_db.TestBase): @@ -56,10 +43,6 @@ class IndexTest(tests_db.TestBase): coll.index_information() [index_name]['expireAfterSeconds']) - def test_meter_ttl_index_absent(self): - self._test_ttl_index_absent(self.conn, 'meter', - 'metering_time_to_live') - def test_event_ttl_index_absent(self): self._test_ttl_index_absent(self.event_conn, 'event', 'event_time_to_live') @@ -77,10 +60,6 @@ class IndexTest(tests_db.TestBase): conn.upgrade() self.assertNotIn(index_name, coll.index_information()) - def test_meter_ttl_index_present(self): - self._test_ttl_index_present(self.conn, 'meter', - 'metering_time_to_live') - def test_event_ttl_index_present(self): self._test_ttl_index_present(self.event_conn, 'event', 'event_time_to_live') @@ -90,44 +69,9 @@ class CapabilitiesTest(test_base.BaseTestCase): # Check the returned capabilities list, which is specific to each DB # driver - def test_capabilities(self): - expected_capabilities = { - 'meters': {'query': {'simple': True, - 'metadata': True}}, - 'resources': {'query': {'simple': True, - 'metadata': True}}, - 'samples': {'query': {'simple': True, - 'metadata': True, - 'complex': True}}, - 'statistics': {'groupby': True, - 'query': {'simple': True, - 'metadata': True}, - 'aggregation': {'standard': True, - 'selectable': { - 'max': True, - 'min': True, - 'sum': True, - 'avg': True, - 'count': True, - 'stddev': True, - 'cardinality': True}} - }, - } - - actual_capabilities = impl_mongodb.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - def test_event_capabilities(self): expected_capabilities = { 'events': {'query': {'simple': True}}, } - actual_capabilities = impl_mongodb_event.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - - def test_storage_capabilities(self): - expected_capabilities = { - 'storage': {'production_ready': True}, - } - actual_capabilities = (impl_mongodb.Connection. - get_storage_capabilities()) + actual_capabilities = impl_mongodb.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) diff --git a/ceilometer/tests/functional/storage/test_impl_sqlalchemy.py b/ceilometer/tests/functional/storage/test_impl_sqlalchemy.py index 5854ba12..877b08b2 100644 --- a/ceilometer/tests/functional/storage/test_impl_sqlalchemy.py +++ b/ceilometer/tests/functional/storage/test_impl_sqlalchemy.py @@ -19,23 +19,14 @@ """ import datetime -import warnings -import mock -from oslo_db import exception -from oslo_utils import timeutils from six.moves import reprlib from ceilometer.event.storage import impl_sqlalchemy as impl_sqla_event from ceilometer.event.storage import models -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.storage import impl_sqlalchemy from ceilometer.storage.sqlalchemy import models as sql_models from ceilometer.tests import base as test_base from ceilometer.tests import db as tests_db -from ceilometer.tests.functional.storage \ - import test_storage_scenarios as scenarios @tests_db.run_with('sqlite', 'mysql', 'pgsql') @@ -47,17 +38,6 @@ class CeilometerBaseTest(tests_db.TestBase): self.assertEqual('value', base['key']) -@tests_db.run_with('sqlite') -class EngineFacadeTest(tests_db.TestBase): - - @mock.patch.object(warnings, 'warn') - def test_no_not_supported_warning(self, mocked): - impl_sqlalchemy.Connection('sqlite://') - impl_sqla_event.Connection('sqlite://') - self.assertNotIn(mock.call(mock.ANY, exception.NotSupportedWarning), - mocked.call_args_list) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql') class EventTypeTest(tests_db.TestBase): # EventType is a construct specific to sqlalchemy @@ -118,108 +98,13 @@ class EventTest(tests_db.TestBase): self.assertTrue(reprlib.repr(ev)) -@tests_db.run_with('sqlite', 'mysql', 'pgsql') -class RelationshipTest(scenarios.DBTestBase): - # Note: Do not derive from SQLAlchemyEngineTestBase, since we - # don't want to automatically inherit all the Meter setup. - - @mock.patch.object(timeutils, 'utcnow') - def test_clear_metering_data_meta_tables(self, mock_utcnow): - mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) - self.conn.clear_expired_metering_data(3 * 60) - - session = self.conn._engine_facade.get_session() - self.assertEqual(5, session.query(sql_models.Sample).count()) - - resource_ids = (session.query(sql_models.Resource.internal_id) - .group_by(sql_models.Resource.internal_id)) - meta_tables = [sql_models.MetaText, sql_models.MetaFloat, - sql_models.MetaBigInt, sql_models.MetaBool] - s = set() - for table in meta_tables: - self.assertEqual(0, (session.query(table) - .filter(~table.id.in_(resource_ids)).count() - )) - s.update(session.query(table.id).all()) - self.assertEqual(set(resource_ids.all()), s) - - class CapabilitiesTest(test_base.BaseTestCase): # Check the returned capabilities list, which is specific to each DB # driver - def test_capabilities(self): - expected_capabilities = { - 'meters': {'query': {'simple': True, - 'metadata': True}}, - 'resources': {'query': {'simple': True, - 'metadata': True}}, - 'samples': {'query': {'simple': True, - 'metadata': True, - 'complex': True}}, - 'statistics': {'groupby': True, - 'query': {'simple': True, - 'metadata': True}, - 'aggregation': {'standard': True, - 'selectable': { - 'max': True, - 'min': True, - 'sum': True, - 'avg': True, - 'count': True, - 'stddev': True, - 'cardinality': True}} - }, - } - - actual_capabilities = impl_sqlalchemy.Connection.get_capabilities() - self.assertEqual(expected_capabilities, actual_capabilities) - def test_event_capabilities(self): expected_capabilities = { 'events': {'query': {'simple': True}}, } actual_capabilities = impl_sqla_event.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) - - def test_storage_capabilities(self): - expected_capabilities = { - 'storage': {'production_ready': True}, - } - actual_capabilities = (impl_sqlalchemy. - Connection.get_storage_capabilities()) - self.assertEqual(expected_capabilities, actual_capabilities) - - -@tests_db.run_with('sqlite', 'mysql', 'pgsql') -class FilterQueryTestForMeters(scenarios.DBTestBase): - def prepare_data(self): - self.counters = [] - c = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5, - user_id=None, - project_id=None, - resource_id='fake_id', - timestamp=datetime.datetime(2012, 9, 25, 10, 30), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.counter', - }, - source='test', - ) - - self.counters.append(c) - msg = utils.meter_message_from_counter( - c, - secret='not-so-secret') - self.conn.record_metering_data(msg) - - def test_get_meters_by_user(self): - meters = list(self.conn.get_meters(user='None')) - self.assertEqual(1, len(meters)) - - def test_get_meters_by_project(self): - meters = list(self.conn.get_meters(project='None')) - self.assertEqual(1, len(meters)) diff --git a/ceilometer/tests/functional/storage/test_pymongo_base.py b/ceilometer/tests/functional/storage/test_pymongo_base.py deleted file mode 100644 index 6dadffad..00000000 --- a/ceilometer/tests/functional/storage/test_pymongo_base.py +++ /dev/null @@ -1,145 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests the mongodb functionality -""" - -import copy -import datetime - -import mock - -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests import db as tests_db -from ceilometer.tests.functional.storage import test_storage_scenarios - - -@tests_db.run_with('mongodb') -class CompatibilityTest(test_storage_scenarios.DBTestBase): - - def prepare_data(self): - def old_record_metering_data(self, data): - received_timestamp = datetime.datetime.utcnow() - self.db.resource.update( - {'_id': data['resource_id']}, - {'$set': {'project_id': data['project_id'], - 'user_id': data['user_id'], - # Current metadata being used and when it was - # last updated. - 'timestamp': data['timestamp'], - 'received_timestamp': received_timestamp, - 'metadata': data['resource_metadata'], - 'source': data['source'], - }, - '$addToSet': {'meter': {'counter_name': data['counter_name'], - 'counter_type': data['counter_type'], - }, - }, - }, - upsert=True, - ) - - record = copy.copy(data) - self.db.meter.insert(record) - - # Stubout with the old version DB schema, the one w/o 'counter_unit' - with mock.patch.object(self.conn, 'record_metering_data', - side_effect=old_record_metering_data): - self.counters = [] - c = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5, - 'user-id', - 'project1', - 'resource-id', - timestamp=datetime.datetime(2012, 9, 25, 10, 30), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.counter', - }, - source='test', - ) - self.counters.append(c) - msg = utils.meter_message_from_counter( - c, - secret='not-so-secret') - self.conn.record_metering_data(self.conn, msg) - - def test_counter_unit(self): - meters = list(self.conn.get_meters()) - self.assertEqual(1, len(meters)) - - -@tests_db.run_with('mongodb') -class FilterQueryTestForMeters(test_storage_scenarios.DBTestBase): - def prepare_data(self): - def old_record_metering_data(self, data): - received_timestamp = datetime.datetime.utcnow() - self.db.resource.update( - {'_id': data['resource_id']}, - {'$set': {'project_id': data['project_id'], - 'user_id': data['user_id'], - # Current metadata being used and when it was - # last updated. - 'timestamp': data['timestamp'], - 'received_timestamp': received_timestamp, - 'metadata': data['resource_metadata'], - 'source': data['source'], - }, - '$addToSet': {'meter': {'counter_name': data['counter_name'], - 'counter_type': data['counter_type'], - }, - }, - }, - upsert=True, - ) - - record = copy.copy(data) - self.db.meter.insert(record) - - # Stubout with the old version DB schema, the one w/o 'counter_unit' - with mock.patch.object(self.conn, 'record_metering_data', - side_effect=old_record_metering_data): - self.counters = [] - c = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5, - None, - None, - None, - timestamp=datetime.datetime(2012, 9, 25, 10, 30), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.counter', - }, - source='test', - ) - - self.counters.append(c) - msg = utils.meter_message_from_counter( - c, - secret='not-so-secret') - self.conn.record_metering_data(self.conn, msg) - - def test_get_meters_by_user(self): - meters = list(self.conn.get_meters(user='None')) - self.assertEqual(1, len(meters)) - - def test_get_meters_by_resource(self): - meters = list(self.conn.get_meters(resource='None')) - self.assertEqual(1, len(meters)) - - def test_get_meters_by_project(self): - meters = list(self.conn.get_meters(project='None')) - self.assertEqual(1, len(meters)) diff --git a/ceilometer/tests/functional/storage/test_storage_scenarios.py b/ceilometer/tests/functional/storage/test_storage_scenarios.py index 2f78b100..bedf4837 100644 --- a/ceilometer/tests/functional/storage/test_storage_scenarios.py +++ b/ceilometer/tests/functional/storage/test_storage_scenarios.py @@ -19,2622 +19,13 @@ import datetime import operator import mock -from oslo_config import cfg -from oslo_db import api -from oslo_db import exception as dbexc from oslo_utils import timeutils -import pymongo -import ceilometer from ceilometer.event import storage as event_storage from ceilometer.event.storage import models as event_models -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer import storage from ceilometer.tests import db as tests_db -class DBTestBase(tests_db.TestBase): - @staticmethod - def create_side_effect(method, exception_type, test_exception): - def side_effect(*args, **kwargs): - if test_exception.pop(): - raise exception_type - else: - return method(*args, **kwargs) - return side_effect - - def create_and_store_sample(self, timestamp=datetime.datetime.utcnow(), - metadata=None, - name='instance', - sample_type=sample.TYPE_CUMULATIVE, unit='', - volume=1, user_id='user-id', - project_id='project-id', - resource_id='resource-id', source=None): - metadata = metadata or {'display_name': 'test-server', - 'tag': 'self.counter'} - s = sample.Sample( - name, sample_type, unit=unit, volume=volume, user_id=user_id, - project_id=project_id, resource_id=resource_id, - timestamp=timestamp, - resource_metadata=metadata, source=source - ) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret - ) - self.conn.record_metering_data(msg) - return msg - - def setUp(self): - super(DBTestBase, self).setUp() - patcher = mock.patch.object(timeutils, 'utcnow') - self.addCleanup(patcher.stop) - self.mock_utcnow = patcher.start() - self.mock_utcnow.return_value = datetime.datetime(2015, 7, 2, 10, 39) - self.prepare_data() - - def prepare_data(self): - original_timestamps = [(2012, 7, 2, 10, 40), (2012, 7, 2, 10, 41), - (2012, 7, 2, 10, 41), (2012, 7, 2, 10, 42), - (2012, 7, 2, 10, 43)] - - timestamps_for_test_samples_default_order = [(2012, 7, 2, 10, 44), - (2011, 5, 30, 18, 3), - (2012, 12, 1, 1, 25), - (2012, 2, 29, 6, 59), - (2013, 5, 31, 23, 7)] - timestamp_list = (original_timestamps + - timestamps_for_test_samples_default_order) - - self.msgs = [] - - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(2012, 7, 2, 10, 39), - source='test-1') - ) - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(*timestamp_list[0]), - source='test-1') - ) - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(*timestamp_list[1]), - resource_id='resource-id-alternate', - metadata={'display_name': 'test-server', 'tag': 'self.counter2'}, - source='test-2') - ) - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(*timestamp_list[2]), - resource_id='resource-id-alternate', - user_id='user-id-alternate', - metadata={'display_name': 'test-server', 'tag': 'self.counter3'}, - source='test-3') - ) - - start_idx = 3 - end_idx = len(timestamp_list) - - for i, ts in zip(range(start_idx - 1, end_idx - 1), - timestamp_list[start_idx:end_idx]): - self.msgs.append( - self.create_and_store_sample( - timestamp=datetime.datetime(*ts), - user_id='user-id-%s' % i, - project_id='project-id-%s' % i, - resource_id='resource-id-%s' % i, - metadata={ - 'display_name': 'test-server', - 'tag': 'counter-%s' % i - }, - source='test') - ) - - -class ResourceTest(DBTestBase): - def prepare_data(self): - super(ResourceTest, self).prepare_data() - - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(2012, 7, 2, 10, 39), - user_id='mongodb_test', - resource_id='resource-id-mongo_bad_key', - project_id='project-id-test', - metadata={'display.name': {'name.$1': 'test-server1', - '$name_2': 'test-server2'}, - 'tag': 'self.counter'}, - source='test-4' - )) - - def test_get_resources(self): - expected_first_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 39) - expected_last_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 40) - msgs_sources = [msg['source'] for msg in self.msgs] - resources = list(self.conn.get_resources()) - self.assertEqual(10, len(resources)) - for resource in resources: - if resource.resource_id != 'resource-id': - continue - self.assertEqual(expected_first_sample_timestamp, - resource.first_sample_timestamp) - self.assertEqual(expected_last_sample_timestamp, - resource.last_sample_timestamp) - self.assertEqual('resource-id', resource.resource_id) - self.assertEqual('project-id', resource.project_id) - self.assertIn(resource.source, msgs_sources) - self.assertEqual('user-id', resource.user_id) - self.assertEqual('test-server', resource.metadata['display_name']) - break - else: - self.fail('Never found resource-id') - - def test_get_resources_start_timestamp(self): - timestamp = datetime.datetime(2012, 7, 2, 10, 42) - expected = set(['resource-id-2', 'resource-id-3', 'resource-id-4', - 'resource-id-6', 'resource-id-8']) - - resources = list(self.conn.get_resources(start_timestamp=timestamp)) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(expected, set(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=timestamp, - start_timestamp_op='ge')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(expected, set(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=timestamp, - start_timestamp_op='gt')) - resource_ids = [r.resource_id for r in resources] - expected.remove('resource-id-2') - self.assertEqual(expected, set(resource_ids)) - - def test_get_resources_end_timestamp(self): - timestamp = datetime.datetime(2012, 7, 2, 10, 42) - expected = set(['resource-id', 'resource-id-alternate', - 'resource-id-5', 'resource-id-7', - 'resource-id-mongo_bad_key']) - - resources = list(self.conn.get_resources(end_timestamp=timestamp)) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(expected, set(resource_ids)) - - resources = list(self.conn.get_resources(end_timestamp=timestamp, - end_timestamp_op='lt')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(expected, set(resource_ids)) - - resources = list(self.conn.get_resources(end_timestamp=timestamp, - end_timestamp_op='le')) - resource_ids = [r.resource_id for r in resources] - expected.add('resource-id-2') - self.assertEqual(expected, set(resource_ids)) - - def test_get_resources_both_timestamps(self): - start_ts = datetime.datetime(2012, 7, 2, 10, 42) - end_ts = datetime.datetime(2012, 7, 2, 10, 43) - - resources = list(self.conn.get_resources(start_timestamp=start_ts, - end_timestamp=end_ts)) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(set(['resource-id-2']), set(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=start_ts, - end_timestamp=end_ts, - start_timestamp_op='ge', - end_timestamp_op='lt')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(set(['resource-id-2']), set(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=start_ts, - end_timestamp=end_ts, - start_timestamp_op='gt', - end_timestamp_op='lt')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(0, len(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=start_ts, - end_timestamp=end_ts, - start_timestamp_op='gt', - end_timestamp_op='le')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(set(['resource-id-3']), set(resource_ids)) - - resources = list(self.conn.get_resources(start_timestamp=start_ts, - end_timestamp=end_ts, - start_timestamp_op='ge', - end_timestamp_op='le')) - resource_ids = [r.resource_id for r in resources] - self.assertEqual(set(['resource-id-2', 'resource-id-3']), - set(resource_ids)) - - def test_get_resources_by_source(self): - resources = list(self.conn.get_resources(source='test-1')) - self.assertEqual(1, len(resources)) - ids = set(r.resource_id for r in resources) - self.assertEqual(set(['resource-id']), ids) - - def test_get_resources_by_user(self): - resources = list(self.conn.get_resources(user='user-id')) - self.assertTrue(len(resources) == 2 or len(resources) == 1) - ids = set(r.resource_id for r in resources) - # tolerate storage driver only reporting latest owner of resource - resources_ever_owned_by = set(['resource-id', - 'resource-id-alternate']) - resources_now_owned_by = set(['resource-id']) - self.assertTrue(ids == resources_ever_owned_by or - ids == resources_now_owned_by, - 'unexpected resources: %s' % ids) - - def test_get_resources_by_alternate_user(self): - resources = list(self.conn.get_resources(user='user-id-alternate')) - self.assertEqual(1, len(resources)) - # only a single resource owned by this user ever - self.assertEqual('resource-id-alternate', resources[0].resource_id) - - def test_get_resources_by_project(self): - resources = list(self.conn.get_resources(project='project-id')) - self.assertEqual(2, len(resources)) - ids = set(r.resource_id for r in resources) - self.assertEqual(set(['resource-id', 'resource-id-alternate']), ids) - - def test_get_resources_by_metaquery(self): - q = {'metadata.display_name': 'test-server'} - resources = list(self.conn.get_resources(metaquery=q)) - self.assertEqual(9, len(resources)) - - def test_get_resources_by_metaquery_key_with_dot_in_metadata(self): - q = {'metadata.display.name.$name_2': 'test-server2', - 'metadata.display.name.name.$1': 'test-server1'} - resources = list(self.conn.get_resources(metaquery=q)) - self.assertEqual(1, len(resources)) - - def test_get_resources_by_empty_metaquery(self): - resources = list(self.conn.get_resources(metaquery={})) - self.assertEqual(10, len(resources)) - - def test_get_resources_most_recent_metadata_all(self): - resources = self.conn.get_resources() - expected_tags = ['self.counter', 'self.counter3', 'counter-2', - 'counter-3', 'counter-4', 'counter-5', 'counter-6', - 'counter-7', 'counter-8'] - - for resource in resources: - self.assertIn(resource.metadata['tag'], expected_tags) - - def test_get_resources_most_recent_metadata_single(self): - resource = list( - self.conn.get_resources(resource='resource-id-alternate') - )[0] - expected_tag = 'self.counter3' - self.assertEqual(expected_tag, resource.metadata['tag']) - - -class ResourceTestOrdering(DBTestBase): - def prepare_data(self): - sample_timings = [('resource-id-1', [(2013, 8, 10, 10, 43), - (2013, 8, 10, 10, 44), - (2013, 8, 10, 10, 42), - (2013, 8, 10, 10, 49), - (2013, 8, 10, 10, 47)]), - ('resource-id-2', [(2013, 8, 10, 10, 43), - (2013, 8, 10, 10, 48), - (2013, 8, 10, 10, 42), - (2013, 8, 10, 10, 48), - (2013, 8, 10, 10, 47)]), - ('resource-id-3', [(2013, 8, 10, 10, 43), - (2013, 8, 10, 10, 44), - (2013, 8, 10, 10, 50), - (2013, 8, 10, 10, 49), - (2013, 8, 10, 10, 47)])] - - counter = 0 - for resource, timestamps in sample_timings: - for timestamp in timestamps: - self.create_and_store_sample( - timestamp=datetime.datetime(*timestamp), - resource_id=resource, - user_id=str(counter % 2), - project_id=str(counter % 3), - metadata={ - 'display_name': 'test-server', - 'tag': 'sample-%s' % counter - }, - source='test' - ) - counter += 1 - - def test_get_resources_ordering_all(self): - resources = list(self.conn.get_resources()) - expected = set([ - ('resource-id-1', 'sample-3'), - ('resource-id-2', 'sample-8'), - ('resource-id-3', 'sample-12') - ]) - received = set([(r.resource_id, r.metadata['tag']) for r in resources]) - self.assertEqual(expected, received) - - def test_get_resources_ordering_single(self): - resource = list(self.conn.get_resources(resource='resource-id-2'))[0] - self.assertEqual('resource-id-2', resource.resource_id) - self.assertEqual('sample-8', resource.metadata['tag']) - - -class MeterTest(DBTestBase): - def test_get_meters(self): - msgs_sources = [msg['source'] for msg in self.msgs] - results = list(self.conn.get_meters()) - self.assertEqual(9, len(results)) - for meter in results: - self.assertIn(meter.source, msgs_sources) - - def test_get_meters_by_user(self): - results = list(self.conn.get_meters(user='user-id')) - self.assertEqual(1, len(results)) - - def test_get_meters_by_project(self): - results = list(self.conn.get_meters(project='project-id')) - self.assertEqual(2, len(results)) - - def test_get_meters_by_metaquery(self): - q = {'metadata.display_name': 'test-server'} - results = list(self.conn.get_meters(metaquery=q)) - self.assertIsNotEmpty(results) - self.assertEqual(9, len(results)) - - def test_get_meters_by_empty_metaquery(self): - results = list(self.conn.get_meters(metaquery={})) - self.assertEqual(9, len(results)) - - -class RawSampleTest(DBTestBase): - - def prepare_data(self): - super(RawSampleTest, self).prepare_data() - - self.msgs.append(self.create_and_store_sample( - timestamp=datetime.datetime(2012, 7, 2, 10, 39), - user_id='mongodb_test', - resource_id='resource-id-mongo_bad_key', - project_id='project-id-test', - metadata={'display.name': {'name.$1': 'test-server1', - '$name_2': 'test-server2'}, - 'tag': 'self.counter'}, - source='test-4' - )) - - def test_get_sample_counter_volume(self): - # NOTE(idegtiarov) Because wsme expected a float type of data this test - # checks type of counter_volume received from database. - f = storage.SampleFilter() - result = next(self.conn.get_samples(f, limit=1)) - self.assertIsInstance(result.counter_volume, float) - - def test_get_samples_limit_zero(self): - f = storage.SampleFilter() - results = list(self.conn.get_samples(f, limit=0)) - self.assertEqual(0, len(results)) - - def test_get_samples_limit(self): - f = storage.SampleFilter() - results = list(self.conn.get_samples(f, limit=3)) - self.assertEqual(3, len(results)) - for result in results: - self.assertTimestampEqual(timeutils.utcnow(), result.recorded_at) - - def test_get_samples_in_default_order(self): - f = storage.SampleFilter() - prev_timestamp = None - for sample_item in self.conn.get_samples(f): - if prev_timestamp is not None: - self.assertTrue(prev_timestamp >= sample_item.timestamp) - prev_timestamp = sample_item.timestamp - - def test_get_samples_by_user(self): - f = storage.SampleFilter(user='user-id') - results = list(self.conn.get_samples(f)) - self.assertEqual(3, len(results)) - for meter in results: - d = meter.as_dict() - self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) - del d['recorded_at'] - self.assertIn(d, self.msgs[:3]) - - def test_get_samples_by_user_limit(self): - f = storage.SampleFilter(user='user-id') - results = list(self.conn.get_samples(f, limit=1)) - self.assertEqual(1, len(results)) - - def test_get_samples_by_user_limit_bigger(self): - f = storage.SampleFilter(user='user-id') - results = list(self.conn.get_samples(f, limit=42)) - self.assertEqual(3, len(results)) - - def test_get_samples_by_project(self): - f = storage.SampleFilter(project='project-id') - results = list(self.conn.get_samples(f)) - self.assertIsNotNone(results) - for meter in results: - d = meter.as_dict() - self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) - del d['recorded_at'] - self.assertIn(d, self.msgs[:4]) - - def test_get_samples_by_resource(self): - f = storage.SampleFilter(user='user-id', resource='resource-id') - results = list(self.conn.get_samples(f)) - self.assertEqual(2, len(results)) - d = results[1].as_dict() - self.assertEqual(timeutils.utcnow(), d['recorded_at']) - del d['recorded_at'] - self.assertEqual(self.msgs[0], d) - - def test_get_samples_by_metaquery(self): - q = {'metadata.display_name': 'test-server'} - f = storage.SampleFilter(metaquery=q) - results = list(self.conn.get_samples(f)) - self.assertIsNotNone(results) - for meter in results: - d = meter.as_dict() - self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at']) - del d['recorded_at'] - self.assertIn(d, self.msgs) - - def test_get_samples_by_metaquery_key_with_dot_in_metadata(self): - q = {'metadata.display.name.name.$1': 'test-server1', - 'metadata.display.name.$name_2': 'test-server2'} - f = storage.SampleFilter(metaquery=q) - results = list(self.conn.get_samples(f)) - self.assertIsNotNone(results) - self.assertEqual(1, len(results)) - - def test_get_samples_by_start_time(self): - timestamp = datetime.datetime(2012, 7, 2, 10, 41) - f = storage.SampleFilter( - user='user-id', - start_timestamp=timestamp, - ) - - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - self.assertEqual(timestamp, results[0].timestamp) - - f.start_timestamp_op = 'ge' - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - self.assertEqual(timestamp, results[0].timestamp) - - f.start_timestamp_op = 'gt' - results = list(self.conn.get_samples(f)) - self.assertEqual(0, len(results)) - - def test_get_samples_by_end_time(self): - timestamp = datetime.datetime(2012, 7, 2, 10, 40) - f = storage.SampleFilter( - user='user-id', - end_timestamp=timestamp, - ) - - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - - f.end_timestamp_op = 'lt' - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - - f.end_timestamp_op = 'le' - results = list(self.conn.get_samples(f)) - self.assertEqual(2, len(results)) - self.assertEqual(datetime.datetime(2012, 7, 2, 10, 39), - results[1].timestamp) - - def test_get_samples_by_both_times(self): - start_ts = datetime.datetime(2012, 7, 2, 10, 42) - end_ts = datetime.datetime(2012, 7, 2, 10, 43) - f = storage.SampleFilter( - start_timestamp=start_ts, - end_timestamp=end_ts, - ) - - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - self.assertEqual(start_ts, results[0].timestamp) - - f.start_timestamp_op = 'gt' - f.end_timestamp_op = 'lt' - results = list(self.conn.get_samples(f)) - self.assertEqual(0, len(results)) - - f.start_timestamp_op = 'ge' - f.end_timestamp_op = 'lt' - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - self.assertEqual(start_ts, results[0].timestamp) - - f.start_timestamp_op = 'gt' - f.end_timestamp_op = 'le' - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - self.assertEqual(end_ts, results[0].timestamp) - - f.start_timestamp_op = 'ge' - f.end_timestamp_op = 'le' - results = list(self.conn.get_samples(f)) - self.assertEqual(2, len(results)) - self.assertEqual(end_ts, results[0].timestamp) - self.assertEqual(start_ts, results[1].timestamp) - - def test_get_samples_by_name(self): - f = storage.SampleFilter(user='user-id', meter='no-such-meter') - results = list(self.conn.get_samples(f)) - self.assertIsEmpty(results) - - def test_get_samples_by_name2(self): - f = storage.SampleFilter(user='user-id', meter='instance') - results = list(self.conn.get_samples(f)) - self.assertIsNotEmpty(results) - - def test_get_samples_by_source(self): - f = storage.SampleFilter(source='test-1') - results = list(self.conn.get_samples(f)) - self.assertEqual(2, len(results)) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase') - def test_clear_metering_data(self): - # NOTE(jd) Override this test in MongoDB because our code doesn't clear - # the collections, this is handled by MongoDB TTL feature. - - self.mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) - self.conn.clear_expired_metering_data(3 * 60) - f = storage.SampleFilter(meter='instance') - results = list(self.conn.get_samples(f)) - self.assertEqual(5, len(results)) - results = list(self.conn.get_resources()) - self.assertEqual(5, len(results)) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase') - def test_clear_metering_data_no_data_to_remove(self): - # NOTE(jd) Override this test in MongoDB because our code doesn't clear - # the collections, this is handled by MongoDB TTL feature. - - self.mock_utcnow.return_value = datetime.datetime(2010, 7, 2, 10, 45) - self.conn.clear_expired_metering_data(3 * 60) - f = storage.SampleFilter(meter='instance') - results = list(self.conn.get_samples(f)) - self.assertEqual(12, len(results)) - results = list(self.conn.get_resources()) - self.assertEqual(10, len(results)) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql') - def test_clear_metering_data_expire_samples_only(self): - - cfg.CONF.set_override('sql_expire_samples_only', True) - self.mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) - self.conn.clear_expired_metering_data(4 * 60) - f = storage.SampleFilter(meter='instance') - results = list(self.conn.get_samples(f)) - self.assertEqual(7, len(results)) - results = list(self.conn.get_resources()) - self.assertEqual(6, len(results)) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql') - def test_record_metering_data_retry_success_on_deadlock(self): - raise_deadlock = [False, True] - self.CONF.set_override('max_retries', 2, group='database') - - s = sample.Sample('instance', sample.TYPE_CUMULATIVE, unit='', - volume=1, user_id='user_id', - project_id='project_id', - resource_id='resource_id', - timestamp=datetime.datetime.utcnow(), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.counter'}, - source=None) - - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret - ) - - mock_resource_create = mock.patch.object(self.conn, "_create_resource") - - mock_resource_create.side_effect = self.create_side_effect( - self.conn._create_resource, dbexc.DBDeadlock, raise_deadlock) - with mock.patch.object(api.time, 'sleep') as retry_sleep: - self.conn.record_metering_data(msg) - self.assertEqual(1, retry_sleep.call_count) - - f = storage.SampleFilter(meter='instance') - results = list(self.conn.get_samples(f)) - self.assertEqual(13, len(results)) - - @tests_db.run_with('sqlite', 'mysql', 'pgsql') - def test_record_metering_data_retry_failure_on_deadlock(self): - raise_deadlock = [True, True, True] - self.CONF.set_override('max_retries', 3, group='database') - - s = sample.Sample('instance', sample.TYPE_CUMULATIVE, unit='', - volume=1, user_id='user_id', - project_id='project_id', - resource_id='resource_id', - timestamp=datetime.datetime.utcnow(), - resource_metadata={'display_name': 'test-server', - 'tag': 'self.counter'}, - source=None) - - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret - ) - - mock_resource_create = mock.patch.object(self.conn, "_create_resource") - - mock_resource_create.side_effect = self.create_side_effect( - self.conn._create_resource, dbexc.DBDeadlock, raise_deadlock) - with mock.patch.object(api.time, 'sleep') as retry_sleep: - try: - self.conn.record_metering_data(msg) - except dbexc.DBError as err: - self.assertIn('DBDeadlock', str(type(err))) - self.assertEqual(3, retry_sleep.call_count) - - -class ComplexSampleQueryTest(DBTestBase): - def setUp(self): - super(ComplexSampleQueryTest, self).setUp() - self.complex_filter = { - "and": - [{"or": - [{"=": {"resource_id": "resource-id-42"}}, - {"=": {"resource_id": "resource-id-44"}}]}, - {"and": - [{"=": {"counter_name": "cpu_util"}}, - {"and": - [{">": {"counter_volume": 0.4}}, - {"not": {">": {"counter_volume": 0.8}}}]}]}]} - or_expression = [{"=": {"resource_id": "resource-id-42"}}, - {"=": {"resource_id": "resource-id-43"}}, - {"=": {"resource_id": "resource-id-44"}}] - and_expression = [{">": {"counter_volume": 0.4}}, - {"not": {">": {"counter_volume": 0.8}}}] - self.complex_filter_list = {"and": - [{"or": or_expression}, - {"and": - [{"=": {"counter_name": "cpu_util"}}, - {"and": and_expression}]}]} - in_expression = {"in": {"resource_id": ["resource-id-42", - "resource-id-43", - "resource-id-44"]}} - self.complex_filter_in = {"and": - [in_expression, - {"and": - [{"=": {"counter_name": "cpu_util"}}, - {"and": and_expression}]}]} - - def _create_samples(self): - for resource in range(42, 45): - for volume in [0.79, 0.41, 0.4, 0.8, 0.39, 0.81]: - metadata = {'a_string_key': "meta-value" + str(volume), - 'a_float_key': volume, - 'an_int_key': resource, - 'a_bool_key': (resource == 43)} - - self.create_and_store_sample(resource_id="resource-id-%s" - % resource, - metadata=metadata, - name="cpu_util", - volume=volume) - - def test_no_filter(self): - results = list(self.conn.query_samples()) - self.assertEqual(len(self.msgs), len(results)) - for sample_item in results: - d = sample_item.as_dict() - del d['recorded_at'] - self.assertIn(d, self.msgs) - - def test_query_complex_filter_with_regexp(self): - self._create_samples() - complex_regex_filter = {"and": [ - {"=~": {"resource_id": "resource-id.*"}}, - {"=": {"counter_volume": 0.4}}]} - results = list( - self.conn.query_samples(filter_expr=complex_regex_filter)) - self.assertEqual(3, len(results)) - for sample_item in results: - self.assertIn(sample_item.resource_id, - set(["resource-id-42", - "resource-id-43", - "resource-id-44"])) - - def test_query_complex_filter_with_regexp_metadata(self): - self._create_samples() - complex_regex_filter = {"and": [ - {"=~": {"resource_metadata.a_string_key": "meta-value.*"}}, - {"=": {"counter_volume": 0.4}}]} - results = list( - self.conn.query_samples(filter_expr=complex_regex_filter)) - self.assertEqual(3, len(results)) - for sample_item in results: - self.assertEqual("meta-value0.4", - sample_item.resource_metadata['a_string_key']) - - def test_no_filter_with_zero_limit(self): - limit = 0 - results = list(self.conn.query_samples(limit=limit)) - self.assertEqual(limit, len(results)) - - def test_no_filter_with_limit(self): - limit = 3 - results = list(self.conn.query_samples(limit=limit)) - self.assertEqual(limit, len(results)) - - def test_query_simple_filter(self): - simple_filter = {"=": {"resource_id": "resource-id-8"}} - results = list(self.conn.query_samples(filter_expr=simple_filter)) - self.assertEqual(1, len(results)) - for sample_item in results: - self.assertEqual("resource-id-8", sample_item.resource_id) - - def test_query_simple_filter_with_not_equal_relation(self): - simple_filter = {"!=": {"resource_id": "resource-id-8"}} - results = list(self.conn.query_samples(filter_expr=simple_filter)) - self.assertEqual(len(self.msgs) - 1, len(results)) - for sample_item in results: - self.assertNotEqual("resource-id-8", sample_item.resource_id) - - def test_query_complex_filter(self): - self._create_samples() - results = list(self.conn.query_samples(filter_expr=( - self.complex_filter))) - self.assertEqual(6, len(results)) - for sample_item in results: - self.assertIn(sample_item.resource_id, - set(["resource-id-42", "resource-id-44"])) - self.assertEqual("cpu_util", sample_item.counter_name) - self.assertTrue(sample_item.counter_volume > 0.4) - self.assertTrue(sample_item.counter_volume <= 0.8) - - def test_query_complex_filter_with_limit(self): - self._create_samples() - limit = 3 - results = list(self.conn.query_samples(filter_expr=self.complex_filter, - limit=limit)) - self.assertEqual(limit, len(results)) - - def test_query_complex_filter_with_simple_orderby(self): - self._create_samples() - expected_volume_order = [0.41, 0.41, 0.79, 0.79, 0.8, 0.8] - orderby = [{"counter_volume": "asc"}] - results = list(self.conn.query_samples(filter_expr=self.complex_filter, - orderby=orderby)) - self.assertEqual(expected_volume_order, - [s.counter_volume for s in results]) - - def test_query_complex_filter_with_complex_orderby(self): - self._create_samples() - expected_volume_order = [0.41, 0.41, 0.79, 0.79, 0.8, 0.8] - expected_resource_id_order = ["resource-id-44", "resource-id-42", - "resource-id-44", "resource-id-42", - "resource-id-44", "resource-id-42"] - - orderby = [{"counter_volume": "asc"}, {"resource_id": "desc"}] - - results = list(self.conn.query_samples(filter_expr=self.complex_filter, - orderby=orderby)) - - self.assertEqual(expected_volume_order, - [s.counter_volume for s in results]) - self.assertEqual(expected_resource_id_order, - [s.resource_id for s in results]) - - def test_query_complex_filter_with_list(self): - self._create_samples() - results = list( - self.conn.query_samples(filter_expr=self.complex_filter_list)) - self.assertEqual(9, len(results)) - for sample_item in results: - self.assertIn(sample_item.resource_id, - set(["resource-id-42", - "resource-id-43", - "resource-id-44"])) - self.assertEqual("cpu_util", sample_item.counter_name) - self.assertTrue(sample_item.counter_volume > 0.4) - self.assertTrue(sample_item.counter_volume <= 0.8) - - def test_query_complex_filter_with_list_with_limit(self): - self._create_samples() - limit = 3 - results = list( - self.conn.query_samples(filter_expr=self.complex_filter_list, - limit=limit)) - self.assertEqual(limit, len(results)) - - def test_query_complex_filter_with_list_with_simple_orderby(self): - self._create_samples() - expected_volume_order = [0.41, 0.41, 0.41, 0.79, 0.79, - 0.79, 0.8, 0.8, 0.8] - orderby = [{"counter_volume": "asc"}] - results = list( - self.conn.query_samples(filter_expr=self.complex_filter_list, - orderby=orderby)) - self.assertEqual(expected_volume_order, - [s.counter_volume for s in results]) - - def test_query_complex_filterwith_list_with_complex_orderby(self): - self._create_samples() - expected_volume_order = [0.41, 0.41, 0.41, 0.79, 0.79, - 0.79, 0.8, 0.8, 0.8] - expected_resource_id_order = ["resource-id-44", "resource-id-43", - "resource-id-42", "resource-id-44", - "resource-id-43", "resource-id-42", - "resource-id-44", "resource-id-43", - "resource-id-42"] - - orderby = [{"counter_volume": "asc"}, {"resource_id": "desc"}] - - results = list( - self.conn.query_samples(filter_expr=self.complex_filter_list, - orderby=orderby)) - - self.assertEqual(expected_volume_order, - [s.counter_volume for s in results]) - self.assertEqual(expected_resource_id_order, - [s.resource_id for s in results]) - - def test_query_complex_filter_with_wrong_order_in_orderby(self): - self._create_samples() - - orderby = [{"counter_volume": "not valid order"}, - {"resource_id": "desc"}] - - query = lambda: list(self.conn.query_samples(filter_expr=( - self.complex_filter), - orderby=orderby)) - self.assertRaises(KeyError, query) - - def test_query_complex_filter_with_in(self): - self._create_samples() - results = list( - self.conn.query_samples(filter_expr=self.complex_filter_in)) - self.assertEqual(9, len(results)) - for sample_item in results: - self.assertIn(sample_item.resource_id, - set(["resource-id-42", - "resource-id-43", - "resource-id-44"])) - self.assertEqual("cpu_util", sample_item.counter_name) - self.assertTrue(sample_item.counter_volume > 0.4) - self.assertTrue(sample_item.counter_volume <= 0.8) - - def test_query_simple_metadata_filter(self): - self._create_samples() - - filter_expr = {"=": {"resource_metadata.a_bool_key": True}} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(6, len(results)) - for sample_item in results: - self.assertTrue(sample_item.resource_metadata["a_bool_key"]) - - def test_query_simple_metadata_with_in_op(self): - self._create_samples() - - filter_expr = {"in": {"resource_metadata.an_int_key": [42, 43]}} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(12, len(results)) - for sample_item in results: - self.assertIn(sample_item.resource_metadata["an_int_key"], - [42, 43]) - - def test_query_complex_metadata_filter(self): - self._create_samples() - subfilter = {"or": [{"=": {"resource_metadata.a_string_key": - "meta-value0.81"}}, - {"<=": {"resource_metadata.a_float_key": 0.41}}]} - filter_expr = {"and": [{">": {"resource_metadata.an_int_key": 42}}, - subfilter]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(8, len(results)) - for sample_item in results: - self.assertTrue((sample_item.resource_metadata["a_string_key"] == - "meta-value0.81" or - sample_item.resource_metadata["a_float_key"] <= - 0.41)) - self.assertTrue(sample_item.resource_metadata["an_int_key"] > 42) - - def test_query_mixed_data_and_metadata_filter(self): - self._create_samples() - subfilter = {"or": [{"=": {"resource_metadata.a_string_key": - "meta-value0.81"}}, - {"<=": {"resource_metadata.a_float_key": 0.41}}]} - - filter_expr = {"and": [{"=": {"resource_id": "resource-id-42"}}, - subfilter]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(4, len(results)) - for sample_item in results: - self.assertTrue((sample_item.resource_metadata["a_string_key"] == - "meta-value0.81" or - sample_item.resource_metadata["a_float_key"] <= - 0.41)) - self.assertEqual("resource-id-42", sample_item.resource_id) - - def test_query_non_existing_metadata_with_result(self): - self._create_samples() - - filter_expr = { - "or": [{"=": {"resource_metadata.a_string_key": - "meta-value0.81"}}, - {"<=": {"resource_metadata.key_not_exists": 0.41}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(3, len(results)) - for sample_item in results: - self.assertEqual("meta-value0.81", - sample_item.resource_metadata["a_string_key"]) - - def test_query_non_existing_metadata_without_result(self): - self._create_samples() - - filter_expr = { - "or": [{"=": {"resource_metadata.key_not_exists": - "meta-value0.81"}}, - {"<=": {"resource_metadata.key_not_exists": 0.41}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - self.assertEqual(0, len(results)) - - def test_query_negated_metadata(self): - self._create_samples() - - filter_expr = { - "and": [{"=": {"resource_id": "resource-id-42"}}, - {"not": {"or": [{">": {"resource_metadata.an_int_key": - 43}}, - {"<=": {"resource_metadata.a_float_key": - 0.41}}]}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(3, len(results)) - for sample_item in results: - self.assertEqual("resource-id-42", sample_item.resource_id) - self.assertTrue(sample_item.resource_metadata["an_int_key"] <= 43) - self.assertTrue(sample_item.resource_metadata["a_float_key"] > - 0.41) - - def test_query_negated_complex_expression(self): - self._create_samples() - filter_expr = { - "and": - [{"=": {"counter_name": "cpu_util"}}, - {"not": - {"or": - [{"or": - [{"=": {"resource_id": "resource-id-42"}}, - {"=": {"resource_id": "resource-id-44"}}]}, - {"and": - [{">": {"counter_volume": 0.4}}, - {"<": {"counter_volume": 0.8}}]}]}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(4, len(results)) - for sample_item in results: - self.assertEqual("resource-id-43", sample_item.resource_id) - self.assertIn(sample_item.counter_volume, [0.39, 0.4, 0.8, 0.81]) - self.assertEqual("cpu_util", sample_item.counter_name) - - def test_query_with_double_negation(self): - self._create_samples() - filter_expr = { - "and": - [{"=": {"counter_name": "cpu_util"}}, - {"not": - {"or": - [{"or": - [{"=": {"resource_id": "resource-id-42"}}, - {"=": {"resource_id": "resource-id-44"}}]}, - {"and": [{"not": {"<=": {"counter_volume": 0.4}}}, - {"<": {"counter_volume": 0.8}}]}]}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(4, len(results)) - for sample_item in results: - self.assertEqual("resource-id-43", sample_item.resource_id) - self.assertIn(sample_item.counter_volume, [0.39, 0.4, 0.8, 0.81]) - self.assertEqual("cpu_util", sample_item.counter_name) - - def test_query_negate_not_equal(self): - self._create_samples() - filter_expr = {"not": {"!=": {"resource_id": "resource-id-43"}}} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(6, len(results)) - for sample_item in results: - self.assertEqual("resource-id-43", sample_item.resource_id) - - def test_query_negated_in_op(self): - self._create_samples() - filter_expr = { - "and": [{"not": {"in": {"counter_volume": [0.39, 0.4, 0.79]}}}, - {"=": {"resource_id": "resource-id-42"}}]} - - results = list(self.conn.query_samples(filter_expr=filter_expr)) - - self.assertEqual(3, len(results)) - for sample_item in results: - self.assertIn(sample_item.counter_volume, - [0.41, 0.8, 0.81]) - - -class StatisticsTest(DBTestBase): - def prepare_data(self): - for i in range(3): - c = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 5 + i, - 'user-id', - 'project1', - 'resource-id', - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.counter', - }, - source='test', - ) - msg = utils.meter_message_from_counter( - c, - secret='not-so-secret', - ) - self.conn.record_metering_data(msg) - for i in range(3): - c = sample.Sample( - 'volume.size', - 'gauge', - 'GiB', - 8 + i, - 'user-5', - 'project2', - 'resource-6', - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={'display_name': 'test-volume', - 'tag': 'self.counter', - }, - source='test', - ) - msg = utils.meter_message_from_counter( - c, - secret='not-so-secret', - ) - self.conn.record_metering_data(msg) - for i in range(3): - c = sample.Sample( - 'memory', - 'gauge', - 'MB', - 8 + i, - 'user-5', - 'project2', - 'resource-6', - timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i), - resource_metadata={}, - source='test', - ) - msg = utils.meter_message_from_counter( - c, - secret='not-so-secret', - ) - self.conn.record_metering_data(msg) - - def test_by_meter(self): - f = storage.SampleFilter( - meter='memory' - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) - - datetime.datetime(2012, 9, 25, 10, 30)).seconds, - results.duration) - self.assertEqual(3, results.count) - self.assertEqual('MB', results.unit) - self.assertEqual(8, results.min) - self.assertEqual(10, results.max) - self.assertEqual(27, results.sum) - self.assertEqual(9, results.avg) - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), - results.period_start) - self.assertEqual(datetime.datetime(2012, 9, 25, 12, 32), - results.period_end) - - def test_by_user(self): - f = storage.SampleFilter( - user='user-5', - meter='volume.size', - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) - - datetime.datetime(2012, 9, 25, 10, 30)).seconds, - results.duration) - self.assertEqual(3, results.count) - self.assertEqual('GiB', results.unit) - self.assertEqual(8, results.min) - self.assertEqual(10, results.max) - self.assertEqual(27, results.sum) - self.assertEqual(9, results.avg) - - def test_no_period_in_query(self): - f = storage.SampleFilter( - user='user-5', - meter='volume.size', - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertEqual(0, results.period) - - def test_period_is_int(self): - f = storage.SampleFilter( - meter='volume.size', - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertIs(int, type(results.period)) - self.assertEqual(6, results.count) - - def test_by_user_period(self): - f = storage.SampleFilter( - user='user-5', - meter='volume.size', - start_timestamp='2012-09-25T10:28:00', - ) - results = list(self.conn.get_meter_statistics(f, period=7200)) - self.assertEqual(2, len(results)) - self.assertEqual(set([datetime.datetime(2012, 9, 25, 10, 28), - datetime.datetime(2012, 9, 25, 12, 28)]), - set(r.period_start for r in results)) - self.assertEqual(set([datetime.datetime(2012, 9, 25, 12, 28), - datetime.datetime(2012, 9, 25, 14, 28)]), - set(r.period_end for r in results)) - r = results[0] - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 28), - r.period_start) - self.assertEqual(2, r.count) - self.assertEqual('GiB', r.unit) - self.assertEqual(8.5, r.avg) - self.assertEqual(8, r.min) - self.assertEqual(9, r.max) - self.assertEqual(17, r.sum) - self.assertEqual(7200, r.period) - self.assertIsInstance(r.period, int) - expected_end = r.period_start + datetime.timedelta(seconds=7200) - self.assertEqual(expected_end, r.period_end) - self.assertEqual(3660, r.duration) - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), - r.duration_start) - self.assertEqual(datetime.datetime(2012, 9, 25, 11, 31), - r.duration_end) - - def test_by_user_period_with_timezone(self): - dates = [ - '2012-09-25T00:28:00-10:00', - '2012-09-25T01:28:00-09:00', - '2012-09-25T02:28:00-08:00', - '2012-09-25T03:28:00-07:00', - '2012-09-25T04:28:00-06:00', - '2012-09-25T05:28:00-05:00', - '2012-09-25T06:28:00-04:00', - '2012-09-25T07:28:00-03:00', - '2012-09-25T08:28:00-02:00', - '2012-09-25T09:28:00-01:00', - '2012-09-25T10:28:00Z', - '2012-09-25T11:28:00+01:00', - '2012-09-25T12:28:00+02:00', - '2012-09-25T13:28:00+03:00', - '2012-09-25T14:28:00+04:00', - '2012-09-25T15:28:00+05:00', - '2012-09-25T16:28:00+06:00', - '2012-09-25T17:28:00+07:00', - '2012-09-25T18:28:00+08:00', - '2012-09-25T19:28:00+09:00', - '2012-09-25T20:28:00+10:00', - '2012-09-25T21:28:00+11:00', - '2012-09-25T22:28:00+12:00', - ] - for date in dates: - f = storage.SampleFilter( - user='user-5', - meter='volume.size', - start_timestamp=date - ) - results = list(self.conn.get_meter_statistics(f, period=7200)) - self.assertEqual(2, len(results)) - self.assertEqual(set([datetime.datetime(2012, 9, 25, 10, 28), - datetime.datetime(2012, 9, 25, 12, 28)]), - set(r.period_start for r in results)) - self.assertEqual(set([datetime.datetime(2012, 9, 25, 12, 28), - datetime.datetime(2012, 9, 25, 14, 28)]), - set(r.period_end for r in results)) - - def test_by_user_period_start_end(self): - f = storage.SampleFilter( - user='user-5', - meter='volume.size', - start_timestamp='2012-09-25T10:28:00', - end_timestamp='2012-09-25T11:28:00', - ) - results = list(self.conn.get_meter_statistics(f, period=1800)) - self.assertEqual(1, len(results)) - r = results[0] - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 28), - r.period_start) - self.assertEqual(1, r.count) - self.assertEqual('GiB', r.unit) - self.assertEqual(8, r.avg) - self.assertEqual(8, r.min) - self.assertEqual(8, r.max) - self.assertEqual(8, r.sum) - self.assertEqual(1800, r.period) - self.assertEqual(r.period_start + datetime.timedelta(seconds=1800), - r.period_end) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), - r.duration_start) - self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30), - r.duration_end) - - def test_by_project(self): - f = storage.SampleFilter( - meter='volume.size', - resource='resource-id', - start_timestamp='2012-09-25T11:30:00', - end_timestamp='2012-09-25T11:32:00', - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertEqual(0, results.duration) - self.assertEqual(1, results.count) - self.assertEqual('GiB', results.unit) - self.assertEqual(6, results.min) - self.assertEqual(6, results.max) - self.assertEqual(6, results.sum) - self.assertEqual(6, results.avg) - - def test_one_resource(self): - f = storage.SampleFilter( - user='user-id', - meter='volume.size', - ) - results = list(self.conn.get_meter_statistics(f))[0] - self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32) - - datetime.datetime(2012, 9, 25, 10, 30)).seconds, - results.duration) - self.assertEqual(3, results.count) - self.assertEqual('GiB', results.unit) - self.assertEqual(5, results.min) - self.assertEqual(7, results.max) - self.assertEqual(18, results.sum) - self.assertEqual(6, results.avg) - - def test_with_no_sample(self): - f = storage.SampleFilter( - user='user-not-exists', - meter='volume.size', - ) - results = list(self.conn.get_meter_statistics(f, period=1800)) - self.assertEqual([], results) - - -class StatisticsGroupByTest(DBTestBase): - def prepare_data(self): - test_sample_data = ( - {'volume': 2, 'user': 'user-1', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', - 'source': 'source-2', 'metadata_instance_type': '84'}, - {'volume': 2, 'user': 'user-1', 'project': 'project-2', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', - 'source': 'source-2', 'metadata_instance_type': '83'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-1', 'metadata_instance_type': '82'}, - {'volume': 1, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1', 'metadata_instance_type': '82'}, - {'volume': 2, 'user': 'user-2', 'project': 'project-1', - 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1', 'metadata_instance_type': '84'}, - {'volume': 4, 'user': 'user-2', 'project': 'project-2', - 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), - 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', - 'source': 'source-1', 'metadata_instance_type': '82'}, - {'volume': 4, 'user': 'user-3', 'project': 'project-1', - 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), - 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', - 'source': 'source-3', 'metadata_instance_type': '83'}, - ) - - for test_sample in test_sample_data: - c = sample.Sample( - 'instance', - sample.TYPE_CUMULATIVE, - unit='s', - volume=test_sample['volume'], - user_id=test_sample['user'], - project_id=test_sample['project'], - resource_id=test_sample['resource'], - timestamp=datetime.datetime(*test_sample['timestamp']), - resource_metadata={'flavor': test_sample['metadata_flavor'], - 'event': test_sample['metadata_event'], - 'instance_type': - test_sample['metadata_instance_type']}, - source=test_sample['source'], - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_group_by_user(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, groupby=['user_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['user_id']), groupby_keys_set) - self.assertEqual(set(['user-1', 'user-2', 'user-3']), groupby_vals_set) - - for r in results: - if r.groupby == {'user_id': 'user-1'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-2'}: - self.assertEqual(4, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(8, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - - def test_group_by_resource(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['resource_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), - groupby_vals_set) - for r in results: - if r.groupby == {'resource_id': 'resource-1'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'resource_id': 'resource-2'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'resource_id': 'resource-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - - def test_group_by_project(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - self.assertEqual(2, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in results: - if r.groupby == {'project_id': 'project-1'}: - self.assertEqual(5, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(10, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'project_id': 'project-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(3, r.avg) - - def test_group_by_source(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, groupby=['source'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['source']), groupby_keys_set) - self.assertEqual(set(['source-1', 'source-2', 'source-3']), - groupby_vals_set) - - for r in results: - if r.groupby == {'source': 'source-1'}: - self.assertEqual(4, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(8, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'source': 'source-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'source': 'source-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - - def test_group_by_unknown_field(self): - f = storage.SampleFilter( - meter='instance', - ) - # NOTE(terriyu): The MongoDB get_meter_statistics() returns a list - # whereas the SQLAlchemy get_meter_statistics() returns a generator. - # You have to apply list() to the SQLAlchemy generator to get it to - # throw an error. The MongoDB get_meter_statistics() will throw an - # error before list() is called. By using lambda, we can cover both - # MongoDB and SQLAlchemy in a single test. - self.assertRaises( - ceilometer.NotImplementedError, - lambda: list(self.conn.get_meter_statistics(f, groupby=['wtf'])) - ) - - def test_group_by_metadata(self): - # This test checks grouping by a single metadata field - # (now only resource_metadata.instance_type is available). - f = storage.SampleFilter( - meter='instance', - ) - results = list( - self.conn.get_meter_statistics( - f, groupby=['resource_metadata.instance_type'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_metadata.instance_type']), - groupby_keys_set) - self.assertEqual(set(['82', '83', '84']), groupby_vals_set) - - for r in results: - if r.groupby == {'resource_metadata.instance_type': '82'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'resource_metadata.instance_type': '83'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(3, r.avg) - elif r.groupby == {'resource_metadata.instance_type': '84'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - - def test_group_by_multiple_regular(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['user_id', - 'resource_id'])) - self.assertEqual(4, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['user_id', 'resource_id']), groupby_keys_set) - self.assertEqual(set(['user-1', 'user-2', 'user-3', 'resource-1', - 'resource-2', 'resource-3']), - groupby_vals_set) - - for r in results: - if r.groupby == {'user_id': 'user-1', 'resource_id': 'resource-1'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-2', - 'resource_id': 'resource-1'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-2', - 'resource_id': 'resource-2'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-3', - 'resource_id': 'resource-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - else: - self.assertNotEqual({'user_id': 'user-1', - 'resource_id': 'resource-2'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-1', - 'resource_id': 'resource-3'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-2', - 'resource_id': 'resource-3'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-3', - 'resource_id': 'resource-1'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-3', - 'resource_id': 'resource-2'}, - r.groupby, ) - - def test_group_by_multiple_metadata(self): - # TODO(terriyu): test_group_by_multiple_metadata needs to be - # implemented. - # This test should check grouping by multiple metadata fields. - pass - - def test_group_by_multiple_regular_metadata(self): - # This test checks grouping by a combination of regular and - # metadata fields. - f = storage.SampleFilter( - meter='instance', - ) - results = list( - self.conn.get_meter_statistics( - f, groupby=['user_id', 'resource_metadata.instance_type'])) - self.assertEqual(5, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['user_id', 'resource_metadata.instance_type']), - groupby_keys_set) - self.assertEqual(set(['user-1', 'user-2', 'user-3', '82', - '83', '84']), - groupby_vals_set) - - for r in results: - if r.groupby == {'user_id': 'user-1', - 'resource_metadata.instance_type': '83'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-1', - 'resource_metadata.instance_type': '84'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-2', - 'resource_metadata.instance_type': '82'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-2', - 'resource_metadata.instance_type': '84'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'user_id': 'user-3', - 'resource_metadata.instance_type': '83'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - else: - self.assertNotEqual({'user_id': 'user-1', - 'resource_metadata.instance_type': '82'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-2', - 'resource_metadata.instance_type': '83'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-3', - 'resource_metadata.instance_type': '82'}, - r.groupby) - self.assertNotEqual({'user_id': 'user-3', - 'resource_metadata.instance_type': '84'}, - r.groupby) - - def test_group_by_with_query_filter(self): - f = storage.SampleFilter( - meter='instance', - project='project-1', - ) - results = list(self.conn.get_meter_statistics( - f, - groupby=['resource_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']), - groupby_vals_set) - - for r in results: - if r.groupby == {'resource_id': 'resource-1'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'resource_id': 'resource-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - elif r.groupby == {'resource_id': 'resource-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - - def test_group_by_metadata_with_query_filter(self): - # This test checks grouping by a metadata field in combination - # with a query filter. - f = storage.SampleFilter( - meter='instance', - project='project-1', - ) - results = list(self.conn.get_meter_statistics( - f, - groupby=['resource_metadata.instance_type'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_metadata.instance_type']), - groupby_keys_set) - self.assertEqual(set(['82', '83', '84']), - groupby_vals_set) - - for r in results: - if r.groupby == {'resource_metadata.instance_type': '82'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - elif r.groupby == {'resource_metadata.instance_type': '83'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - elif r.groupby == {'resource_metadata.instance_type': '84'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - - def test_group_by_with_query_filter_multiple(self): - f = storage.SampleFilter( - meter='instance', - user='user-2', - source='source-1', - ) - results = list(self.conn.get_meter_statistics( - f, - groupby=['project_id', 'resource_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id', 'resource_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2', - 'resource-1', 'resource-2']), - groupby_vals_set) - - for r in results: - if r.groupby == {'project_id': 'project-1', - 'resource_id': 'resource-1'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'project_id': 'project-1', - 'resource_id': 'resource-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - elif r.groupby == {'project_id': 'project-2', - 'resource_id': 'resource-2'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - else: - self.assertNotEqual({'project_id': 'project-2', - 'resource_id': 'resource-1'}, - r.groupby) - - def test_group_by_metadata_with_query_filter_multiple(self): - # TODO(terriyu): test_group_by_metadata_with_query_filter_multiple - # needs to be implemented. - # This test should check grouping by multiple metadata fields in - # combination with a query filter. - pass - - def test_group_by_with_period(self): - f = storage.SampleFilter( - meter='instance', - ) - results = list(self.conn.get_meter_statistics(f, - period=7200, - groupby=['project_id'])) - self.assertEqual(4, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), - datetime.datetime(2013, 8, 1, 14, 11), - datetime.datetime(2013, 8, 1, 16, 11)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(4260, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(4260, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - elif (r.groupby == {'project_id': 'project-2'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - elif (r.groupby == {'project_id': 'project-2'} and - r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), - r.period_end) - else: - self.assertNotEqual([{'project_id': 'project-1'}, - datetime.datetime(2013, 8, 1, 16, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 10, 11)], - [r.groupby, r.period_start]) - - def test_group_by_metadata_with_period(self): - # This test checks grouping by metadata fields in combination - # with period grouping. - f = storage.SampleFilter( - meter='instance') - - results = list(self.conn.get_meter_statistics(f, period=7200, - groupby=['resource_metadata.instance_type'])) - self.assertEqual(5, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_metadata.instance_type']), - groupby_keys_set) - self.assertEqual(set(['82', '83', '84']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), - datetime.datetime(2013, 8, 1, 14, 11), - datetime.datetime(2013, 8, 1, 16, 11)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'resource_metadata.instance_type': '82'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - self.assertEqual(1740, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '82'} and - r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '83'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '83'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '84'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(4260, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - else: - self.assertNotEqual([{'resource_metadata.instance_type': '82'}, - datetime.datetime(2013, 8, 1, 14, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'resource_metadata.instance_type': '83'}, - datetime.datetime(2013, 8, 1, 16, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'resource_metadata.instance_type': '84'}, - datetime.datetime(2013, 8, 1, 10, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'resource_metadata.instance_type': '84'}, - datetime.datetime(2013, 8, 1, 16, 11)], - [r.groupby, r.period_start]) - - def test_group_by_with_query_filter_and_period(self): - f = storage.SampleFilter( - meter='instance', - source='source-1', - ) - results = list(self.conn.get_meter_statistics(f, - period=7200, - groupby=['project_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), - datetime.datetime(2013, 8, 1, 14, 11), - datetime.datetime(2013, 8, 1, 16, 11)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - self.assertEqual(1740, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - elif (r.groupby == {'project_id': 'project-2'} and - r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11), - r.period_end) - else: - self.assertNotEqual([{'project_id': 'project-1'}, - datetime.datetime(2013, 8, 1, 16, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 10, 11)], - [r.groupby, r.period_start]) - - def test_group_by_metadata_with_query_filter_and_period(self): - # This test checks grouping with metadata fields in combination - # with a query filter and period grouping. - f = storage.SampleFilter( - meter='instance', - project='project-1', - ) - results = list( - self.conn.get_meter_statistics( - f, period=7200, groupby=['resource_metadata.instance_type'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_metadata.instance_type']), - groupby_keys_set) - self.assertEqual(set(['82', '83', '84']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), - datetime.datetime(2013, 8, 1, 14, 11)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'resource_metadata.instance_type': '82'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - self.assertEqual(1740, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '83'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11), - r.period_end) - elif (r.groupby == {'resource_metadata.instance_type': '84'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(4260, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11), - r.period_end) - else: - self.assertNotEqual([{'resource_metadata.instance_type': '82'}, - datetime.datetime(2013, 8, 1, 14, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'resource_metadata.instance_type': '83'}, - datetime.datetime(2013, 8, 1, 14, 11)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'resource_metadata.instance_type': '84'}, - datetime.datetime(2013, 8, 1, 10, 11)], - [r.groupby, r.period_start]) - - def test_group_by_start_timestamp_after(self): - f = storage.SampleFilter( - meter='instance', - start_timestamp=datetime.datetime(2013, 8, 1, 17, 28, 1), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - - self.assertEqual([], results) - - def test_group_by_end_timestamp_before(self): - f = storage.SampleFilter( - meter='instance', - end_timestamp=datetime.datetime(2013, 8, 1, 10, 10, 59), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - - self.assertEqual([], results) - - def test_group_by_start_timestamp(self): - f = storage.SampleFilter( - meter='instance', - start_timestamp=datetime.datetime(2013, 8, 1, 14, 58), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - self.assertEqual(2, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in results: - if r.groupby == {'project_id': 'project-1'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'project_id': 'project-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(3, r.avg) - - def test_group_by_end_timestamp(self): - f = storage.SampleFilter( - meter='instance', - end_timestamp=datetime.datetime(2013, 8, 1, 11, 45), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - self.assertEqual(1, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1']), groupby_vals_set) - - for r in results: - if r.groupby == {'project_id': 'project-1'}: - self.assertEqual(3, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(2, r.avg) - - def test_group_by_start_end_timestamp(self): - f = storage.SampleFilter( - meter='instance', - start_timestamp=datetime.datetime(2013, 8, 1, 8, 17, 3), - end_timestamp=datetime.datetime(2013, 8, 1, 23, 59, 59), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['project_id'])) - self.assertEqual(2, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - - for r in results: - if r.groupby == {'project_id': 'project-1'}: - self.assertEqual(5, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(4, r.max) - self.assertEqual(10, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'project_id': 'project-2'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(4, r.max) - self.assertEqual(6, r.sum) - self.assertEqual(3, r.avg) - - def test_group_by_start_end_timestamp_with_query_filter(self): - f = storage.SampleFilter( - meter='instance', - project='project-1', - start_timestamp=datetime.datetime(2013, 8, 1, 11, 1), - end_timestamp=datetime.datetime(2013, 8, 1, 20, 0), - ) - results = list(self.conn.get_meter_statistics(f, - groupby=['resource_id'])) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['resource_id']), groupby_keys_set) - self.assertEqual(set(['resource-1', 'resource-3']), groupby_vals_set) - - for r in results: - if r.groupby == {'resource_id': 'resource-1'}: - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(2, r.avg) - elif r.groupby == {'resource_id': 'resource-3'}: - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - - def test_group_by_start_end_timestamp_with_period(self): - f = storage.SampleFilter( - meter='instance', - start_timestamp=datetime.datetime(2013, 8, 1, 14, 0), - end_timestamp=datetime.datetime(2013, 8, 1, 17, 0), - ) - results = list(self.conn.get_meter_statistics(f, - period=3600, - groupby=['project_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 14, 0), - datetime.datetime(2013, 8, 1, 15, 0), - datetime.datetime(2013, 8, 1, 16, 0)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 0)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_end) - self.assertEqual(3600, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 0), - r.period_end) - elif (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 16, 0)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10), - r.duration_end) - self.assertEqual(3600, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 0), - r.period_end) - elif (r.groupby == {'project_id': 'project-2'} and - r.period_start == datetime.datetime(2013, 8, 1, 15, 0)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37), - r.duration_end) - self.assertEqual(3600, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 0), - r.period_end) - else: - self.assertNotEqual([{'project_id': 'project-1'}, - datetime.datetime(2013, 8, 1, 15, 0)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 14, 0)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 16, 0)], - [r.groupby, r.period_start]) - - def test_group_by_start_end_timestamp_with_query_filter_and_period(self): - f = storage.SampleFilter( - meter='instance', - source='source-1', - start_timestamp=datetime.datetime(2013, 8, 1, 10, 0), - end_timestamp=datetime.datetime(2013, 8, 1, 18, 0), - ) - results = list(self.conn.get_meter_statistics(f, - period=7200, - groupby=['project_id'])) - self.assertEqual(3, len(results)) - groupby_list = [r.groupby for r in results] - groupby_keys_set = set(x for sub_dict in groupby_list - for x in sub_dict.keys()) - groupby_vals_set = set(x for sub_dict in groupby_list - for x in sub_dict.values()) - self.assertEqual(set(['project_id']), groupby_keys_set) - self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set) - period_start_set = set([r.period_start for r in results]) - period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 0), - datetime.datetime(2013, 8, 1, 14, 0), - datetime.datetime(2013, 8, 1, 16, 0)]) - self.assertEqual(period_start_valid, period_start_set) - - for r in results: - if (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 10, 0)): - self.assertEqual(2, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(1, r.min) - self.assertEqual(1, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(1, r.avg) - self.assertEqual(1740, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 12, 0), - r.period_end) - elif (r.groupby == {'project_id': 'project-1'} and - r.period_start == datetime.datetime(2013, 8, 1, 14, 0)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(2, r.min) - self.assertEqual(2, r.max) - self.assertEqual(2, r.sum) - self.assertEqual(2, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 16, 0), - r.period_end) - elif (r.groupby == {'project_id': 'project-2'} and - r.period_start == datetime.datetime(2013, 8, 1, 16, 0)): - self.assertEqual(1, r.count) - self.assertEqual('s', r.unit) - self.assertEqual(4, r.min) - self.assertEqual(4, r.max) - self.assertEqual(4, r.sum) - self.assertEqual(4, r.avg) - self.assertEqual(0, r.duration) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_start) - self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28), - r.duration_end) - self.assertEqual(7200, r.period) - self.assertEqual(datetime.datetime(2013, 8, 1, 18, 0), - r.period_end) - else: - self.assertNotEqual([{'project_id': 'project-1'}, - datetime.datetime(2013, 8, 1, 16, 0)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 10, 0)], - [r.groupby, r.period_start]) - self.assertNotEqual([{'project_id': 'project-2'}, - datetime.datetime(2013, 8, 1, 14, 0)], - [r.groupby, r.period_start]) - - -class CounterDataTypeTest(DBTestBase): - def prepare_data(self): - c = sample.Sample( - 'dummyBigCounter', - sample.TYPE_CUMULATIVE, - unit='', - volume=337203685477580, - user_id='user-id', - project_id='project-id', - resource_id='resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={}, - source='test-1', - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - - self.conn.record_metering_data(msg) - - c = sample.Sample( - 'dummySmallCounter', - sample.TYPE_CUMULATIVE, - unit='', - volume=-337203685477580, - user_id='user-id', - project_id='project-id', - resource_id='resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={}, - source='test-1', - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - c = sample.Sample( - 'floatCounter', - sample.TYPE_CUMULATIVE, - unit='', - volume=1938495037.53697, - user_id='user-id', - project_id='project-id', - resource_id='resource-id', - timestamp=datetime.datetime(2012, 7, 2, 10, 40), - resource_metadata={}, - source='test-1', - ) - msg = utils.meter_message_from_counter( - c, self.CONF.publisher.telemetry_secret, - ) - self.conn.record_metering_data(msg) - - def test_storage_can_handle_large_values(self): - f = storage.SampleFilter( - meter='dummyBigCounter', - ) - results = list(self.conn.get_samples(f)) - self.assertEqual(337203685477580, results[0].counter_volume) - f = storage.SampleFilter( - meter='dummySmallCounter', - ) - results = list(self.conn.get_samples(f)) - observed_num = int(results[0].counter_volume) - self.assertEqual(-337203685477580, observed_num) - - def test_storage_can_handle_float_values(self): - f = storage.SampleFilter( - meter='floatCounter', - ) - results = list(self.conn.get_samples(f)) - self.assertEqual(1938495037.53697, results[0].counter_volume) - - class EventTestBase(tests_db.TestBase): """Separate test base class. @@ -3067,134 +458,3 @@ class GetEventTest(EventTestBase): (event_models.Trait.FLOAT_TYPE, 0.0)] for trait in events[0].traits: options.remove((trait.dtype, trait.value)) - - -class BigIntegerTest(tests_db.TestBase): - def test_metadata_bigint(self): - metadata = {'bigint': 99999999999999} - s = sample.Sample(name='name', - type=sample.TYPE_GAUGE, - unit='B', - volume=1, - user_id='user-id', - project_id='project-id', - resource_id='resource-id', - timestamp=datetime.datetime.utcnow(), - resource_metadata=metadata) - msg = utils.meter_message_from_counter( - s, self.CONF.publisher.telemetry_secret) - self.conn.record_metering_data(msg) - - -@tests_db.run_with('mongodb') -class MongoAutoReconnectTest(DBTestBase): - def setUp(self): - super(MongoAutoReconnectTest, self).setUp() - self.CONF.set_override('retry_interval', 0, group='database') - - def test_mongo_client(self): - self.assertIsInstance(self.conn.conn.conn, - pymongo.MongoClient) - - def test_mongo_cursor_next(self): - expected_first_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 39) - raise_exc = [False, True] - method = self.conn.db.resource.find().cursor.next - with mock.patch('pymongo.cursor.Cursor.next', - mock.Mock()) as mock_next: - mock_next.side_effect = self.create_side_effect( - method, pymongo.errors.AutoReconnect, raise_exc) - resource = self.conn.db.resource.find().next() - self.assertEqual(expected_first_sample_timestamp, - resource['first_sample_timestamp']) - - def test_mongo_insert(self): - raise_exc = [False, True] - method = self.conn.db.meter.insert - - with mock.patch('pymongo.collection.Collection.insert', - mock.Mock(return_value=method)) as mock_insert: - mock_insert.side_effect = self.create_side_effect( - method, pymongo.errors.AutoReconnect, raise_exc) - mock_insert.__name__ = 'insert' - self.create_and_store_sample( - timestamp=datetime.datetime(2014, 10, 15, 14, 39), - source='test-proxy') - meters = list(self.conn.db.meter.find()) - self.assertEqual(12, len(meters)) - - def test_mongo_find_and_modify(self): - raise_exc = [False, True] - method = self.conn.db.resource.find_and_modify - - with mock.patch('pymongo.collection.Collection.find_and_modify', - mock.Mock()) as mock_fam: - mock_fam.side_effect = self.create_side_effect( - method, pymongo.errors.AutoReconnect, raise_exc) - mock_fam.__name__ = 'find_and_modify' - self.create_and_store_sample( - timestamp=datetime.datetime(2014, 10, 15, 14, 39), - source='test-proxy') - data = self.conn.db.resource.find( - {'last_sample_timestamp': - datetime.datetime(2014, 10, 15, 14, 39)})[0]['source'] - self.assertEqual('test-proxy', data) - - def test_mongo_update(self): - raise_exc = [False, True] - method = self.conn.db.resource.update - - with mock.patch('pymongo.collection.Collection.update', - mock.Mock()) as mock_update: - mock_update.side_effect = self.create_side_effect( - method, pymongo.errors.AutoReconnect, raise_exc) - mock_update.__name__ = 'update' - self.create_and_store_sample( - timestamp=datetime.datetime(2014, 10, 15, 17, 39), - source='test-proxy-update') - data = self.conn.db.resource.find( - {'last_sample_timestamp': - datetime.datetime(2014, 10, 15, 17, 39)})[0]['source'] - self.assertEqual('test-proxy-update', data) - - -@tests_db.run_with('mongodb') -class MongoTimeToLiveTest(DBTestBase): - - def test_ensure_index(self): - cfg.CONF.set_override('metering_time_to_live', 5, group='database') - self.conn.upgrade() - self.assertEqual(5, self.conn.db.resource.index_information() - ['resource_ttl']['expireAfterSeconds']) - self.assertEqual(5, self.conn.db.meter.index_information() - ['meter_ttl']['expireAfterSeconds']) - - def test_modification_of_index(self): - cfg.CONF.set_override('metering_time_to_live', 5, group='database') - self.conn.upgrade() - cfg.CONF.set_override('metering_time_to_live', 15, group='database') - self.conn.upgrade() - self.assertEqual(15, self.conn.db.resource.index_information() - ['resource_ttl']['expireAfterSeconds']) - self.assertEqual(15, self.conn.db.meter.index_information() - ['meter_ttl']['expireAfterSeconds']) - - -class TestRecordUnicodeSamples(DBTestBase): - def prepare_data(self): - self.msgs = [] - self.msgs.append(self.create_and_store_sample( - name=u'meter.accent\xe9\u0437', - metadata={u"metadata_key\xe9\u0437": "test", - u"metadata_key": u"test\xe9\u0437"}, - )) - - def test_unicode_sample(self): - f = storage.SampleFilter() - results = list(self.conn.get_samples(f)) - self.assertEqual(1, len(results)) - expected = self.msgs[0] - actual = results[0].as_dict() - self.assertEqual(expected['counter_name'], actual['counter_name']) - self.assertEqual(expected['resource_metadata'], - actual['resource_metadata']) diff --git a/ceilometer/tests/functional/test_bin.py b/ceilometer/tests/functional/test_bin.py index 3339922b..c51c50df 100644 --- a/ceilometer/tests/functional/test_bin.py +++ b/ceilometer/tests/functional/test_bin.py @@ -50,8 +50,6 @@ class BinTestCase(base.BaseTestCase): stderr=subprocess.PIPE) __, err = subp.communicate() self.assertEqual(0, subp.poll()) - self.assertIn(b"Nothing to clean, database metering " - b"time to live is disabled", err) self.assertIn(b"Nothing to clean, database event " b"time to live is disabled", err) @@ -78,88 +76,4 @@ class BinTestCase(base.BaseTestCase): self.assertIn(msg, err) def test_run_expirer_ttl_enabled(self): - self._test_run_expirer_ttl_enabled('metering_time_to_live', - 'metering') - self._test_run_expirer_ttl_enabled('time_to_live', 'metering') self._test_run_expirer_ttl_enabled('event_time_to_live', 'event') - - -class BinSendSampleTestCase(base.BaseTestCase): - def setUp(self): - super(BinSendSampleTestCase, self).setUp() - pipeline_cfg_file = self.path_get('etc/ceilometer/pipeline.yaml') - content = ("[DEFAULT]\n" - "rpc_backend=fake\n" - "pipeline_cfg_file={0}\n".format(pipeline_cfg_file)) - if six.PY3: - content = content.encode('utf-8') - - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='ceilometer', - suffix='.conf') - - def tearDown(self): - super(BinSendSampleTestCase, self).tearDown() - os.remove(self.tempfile) - - def test_send_counter_run(self): - subp = subprocess.Popen(['ceilometer-send-sample', - "--config-file=%s" % self.tempfile, - "--sample-resource=someuuid", - "--sample-name=mycounter"]) - self.assertEqual(0, subp.wait()) - - -class BinCeilometerPollingServiceTestCase(base.BaseTestCase): - def setUp(self): - super(BinCeilometerPollingServiceTestCase, self).setUp() - self.tempfile = None - self.subp = None - - def tearDown(self): - if self.subp: - try: - self.subp.kill() - except OSError: - pass - os.remove(self.tempfile) - super(BinCeilometerPollingServiceTestCase, self).tearDown() - - def test_starting_with_duplication_namespaces(self): - content = ("[DEFAULT]\n" - "rpc_backend=fake\n" - "[database]\n" - "connection=log://localhost\n") - if six.PY3: - content = content.encode('utf-8') - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='ceilometer', - suffix='.conf') - self.subp = subprocess.Popen(['ceilometer-polling', - "--config-file=%s" % self.tempfile, - "--polling-namespaces", - "compute", - "compute"], - stderr=subprocess.PIPE) - out = self.subp.stderr.read(1024) - self.assertIn(b'Duplicated values: [\'compute\', \'compute\'] ' - b'found in CLI options, auto de-duplicated', out) - - def test_polling_namespaces_invalid_value_in_config(self): - content = ("[DEFAULT]\n" - "rpc_backend=fake\n" - "polling_namespaces = ['central']\n" - "[database]\n" - "connection=log://localhost\n") - if six.PY3: - content = content.encode('utf-8') - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='ceilometer', - suffix='.conf') - self.subp = subprocess.Popen( - ["ceilometer-polling", "--config-file=%s" % self.tempfile], - stderr=subprocess.PIPE) - __, err = self.subp.communicate() - expected = ("Exception: Valid values are ['compute', 'central', " - "'ipmi'], but found [\"['central']\"]") - self.assertIn(expected, err) diff --git a/ceilometer/tests/functional/test_collector.py b/ceilometer/tests/functional/test_collector.py deleted file mode 100644 index 5c09188b..00000000 --- a/ceilometer/tests/functional/test_collector.py +++ /dev/null @@ -1,248 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import socket - -import mock -import msgpack -from oslo_config import fixture as fixture_config -import oslo_messaging -from oslo_utils import timeutils -from oslotest import mockpatch -from stevedore import extension - -from ceilometer import collector -from ceilometer import dispatcher -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer.tests import base as tests_base - - -class FakeException(Exception): - pass - - -class FakeConnection(object): - def create_worker(self, topic, proxy, pool_name): - pass - - -class TestCollector(tests_base.BaseTestCase): - def setUp(self): - super(TestCollector, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.import_opt("connection", "oslo_db.options", group="database") - self.CONF.set_override("connection", "log://", group='database') - self.CONF.set_override('telemetry_secret', 'not-so-secret', - group='publisher') - self._setup_messaging() - - self.counter = sample.Sample( - name='foobar', - type='bad', - unit='F', - volume=1, - user_id='jd', - project_id='ceilometer', - resource_id='cat', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={}, - ).as_dict() - - self.utf8_msg = utils.meter_message_from_counter( - sample.Sample( - name=u'test', - type=sample.TYPE_CUMULATIVE, - unit=u'', - volume=1, - user_id=u'test', - project_id=u'test', - resource_id=u'test_run_tasks', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={u'name': [([u'TestPublish'])]}, - source=u'testsource', - ), - 'not-so-secret') - - self.srv = collector.CollectorService() - - def _setup_messaging(self, enabled=True): - if enabled: - self.setup_messaging(self.CONF) - else: - self.useFixture(mockpatch.Patch( - 'ceilometer.messaging.get_transport', - return_value=None)) - - def _setup_fake_dispatcher(self): - plugin = mock.MagicMock() - fake_dispatcher = extension.ExtensionManager.make_test_instance([ - extension.Extension('test', None, None, plugin,), - ], propagate_map_exceptions=True) - self.useFixture(mockpatch.Patch( - 'ceilometer.dispatcher.load_dispatcher_manager', - return_value=(fake_dispatcher, fake_dispatcher))) - return plugin - - def _make_fake_socket(self, sample): - def recvfrom(size): - # Make the loop stop - self.srv.udp_run = False - return msgpack.dumps(sample), ('127.0.0.1', 12345) - - sock = mock.Mock() - sock.recvfrom = recvfrom - return sock - - def _verify_udp_socket(self, udp_socket): - conf = self.CONF.collector - udp_socket.setsockopt.assert_called_once_with(socket.SOL_SOCKET, - socket.SO_REUSEADDR, 1) - udp_socket.bind.assert_called_once_with((conf.udp_address, - conf.udp_port)) - - def test_udp_receive_base(self): - self._setup_messaging(False) - mock_dispatcher = self._setup_fake_dispatcher() - self.counter['source'] = 'mysource' - self.counter['counter_name'] = self.counter['name'] - self.counter['counter_volume'] = self.counter['volume'] - self.counter['counter_type'] = self.counter['type'] - self.counter['counter_unit'] = self.counter['unit'] - - udp_socket = self._make_fake_socket(self.counter) - - with mock.patch('socket.socket') as mock_socket: - mock_socket.return_value = udp_socket - self.srv.start() - self.addCleanup(self.srv.stop) - self.srv.udp_thread.join(5) - self.assertFalse(self.srv.udp_thread.is_alive()) - mock_socket.assert_called_with(socket.AF_INET, socket.SOCK_DGRAM) - - self._verify_udp_socket(udp_socket) - mock_record = mock_dispatcher.verify_and_record_metering_data - mock_record.assert_called_once_with(self.counter) - - def test_udp_socket_ipv6(self): - self._setup_messaging(False) - self.CONF.set_override('udp_address', '::1', group='collector') - self._setup_fake_dispatcher() - sock = self._make_fake_socket('data') - - with mock.patch.object(socket, 'socket') as mock_socket: - mock_socket.return_value = sock - self.srv.start() - self.addCleanup(self.srv.stop) - self.srv.udp_thread.join(5) - self.assertFalse(self.srv.udp_thread.is_alive()) - mock_socket.assert_called_with(socket.AF_INET6, socket.SOCK_DGRAM) - - def test_udp_receive_storage_error(self): - self._setup_messaging(False) - mock_dispatcher = self._setup_fake_dispatcher() - mock_record = mock_dispatcher.verify_and_record_metering_data - mock_record.side_effect = self._raise_error - - self.counter['source'] = 'mysource' - self.counter['counter_name'] = self.counter['name'] - self.counter['counter_volume'] = self.counter['volume'] - self.counter['counter_type'] = self.counter['type'] - self.counter['counter_unit'] = self.counter['unit'] - - udp_socket = self._make_fake_socket(self.counter) - with mock.patch('socket.socket', return_value=udp_socket): - self.srv.start() - self.addCleanup(self.srv.stop) - self.srv.udp_thread.join(5) - self.assertFalse(self.srv.udp_thread.is_alive()) - - self._verify_udp_socket(udp_socket) - - mock_record.assert_called_once_with(self.counter) - - @staticmethod - def _raise_error(*args, **kwargs): - raise Exception - - def test_udp_receive_bad_decoding(self): - self._setup_messaging(False) - self._setup_fake_dispatcher() - udp_socket = self._make_fake_socket(self.counter) - with mock.patch('socket.socket', return_value=udp_socket): - with mock.patch('msgpack.loads', self._raise_error): - self.srv.start() - self.addCleanup(self.srv.stop) - self.srv.udp_thread.join(5) - self.assertFalse(self.srv.udp_thread.is_alive()) - - self._verify_udp_socket(udp_socket) - - @mock.patch.object(collector.CollectorService, 'start_udp') - def test_only_udp(self, udp_start): - """Check that only UDP is started if messaging transport is unset.""" - self._setup_messaging(False) - self._setup_fake_dispatcher() - udp_socket = self._make_fake_socket(self.counter) - real_start = oslo_messaging.MessageHandlingServer.start - with mock.patch.object(oslo_messaging.MessageHandlingServer, - 'start', side_effect=real_start) as rpc_start: - with mock.patch('socket.socket', return_value=udp_socket): - self.srv.start() - self.addCleanup(self.srv.stop) - self.srv.udp_thread.join(5) - self.assertFalse(self.srv.udp_thread.is_alive()) - self.assertEqual(0, rpc_start.call_count) - self.assertEqual(1, udp_start.call_count) - - def test_udp_receive_valid_encoding(self): - self._setup_messaging(False) - mock_dispatcher = self._setup_fake_dispatcher() - self.data_sent = [] - with mock.patch('socket.socket', - return_value=self._make_fake_socket(self.utf8_msg)): - self.srv.start() - self.addCleanup(self.srv.stop) - self.srv.udp_thread.join(5) - self.assertFalse(self.srv.udp_thread.is_alive()) - self.assertTrue(utils.verify_signature( - mock_dispatcher.method_calls[0][1][0], - "not-so-secret")) - - def _test_collector_requeue(self, listener, batch_listener=False): - - mock_dispatcher = self._setup_fake_dispatcher() - self.srv.dispatcher_manager = dispatcher.load_dispatcher_manager() - mock_record = mock_dispatcher.verify_and_record_metering_data - mock_record.side_effect = Exception('boom') - mock_dispatcher.verify_and_record_events.side_effect = Exception( - 'boom') - - self.srv.start() - self.addCleanup(self.srv.stop) - endp = getattr(self.srv, listener).dispatcher.endpoints[0] - ret = endp.sample([{'ctxt': {}, 'publisher_id': 'pub_id', - 'event_type': 'event', 'payload': {}, - 'metadata': {}}]) - self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, - ret) - - @mock.patch.object(collector.CollectorService, 'start_udp', mock.Mock()) - def test_collector_sample_requeue(self): - self._test_collector_requeue('sample_listener') - - @mock.patch.object(collector.CollectorService, 'start_udp', mock.Mock()) - def test_collector_event_requeue(self): - self.CONF.set_override('store_events', True, group='notification') - self._test_collector_requeue('event_listener') diff --git a/ceilometer/tests/functional/test_notification.py b/ceilometer/tests/functional/test_notification.py deleted file mode 100644 index 7e4ee6a4..00000000 --- a/ceilometer/tests/functional/test_notification.py +++ /dev/null @@ -1,614 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for Ceilometer notify daemon.""" - -import shutil - -import mock -from oslo_config import fixture as fixture_config -import oslo_messaging -import oslo_service.service -from oslo_utils import fileutils -from oslo_utils import timeutils -import six -from stevedore import extension -import yaml - -from ceilometer.compute.notifications import instance -from ceilometer import messaging -from ceilometer import notification -from ceilometer.publisher import test as test_publisher -from ceilometer.tests import base as tests_base - -TEST_NOTICE_CTXT = { - u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'is_admin': True, - u'project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'quota_class': None, - u'read_deleted': u'no', - u'remote_address': u'10.0.2.15', - u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', - u'roles': [u'admin'], - u'timestamp': u'2012-05-08T20:23:41.425105', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', -} - -TEST_NOTICE_METADATA = { - u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', - u'timestamp': u'2012-05-08 20:23:48.028195', -} - -TEST_NOTICE_PAYLOAD = { - u'created_at': u'2012-05-08 20:23:41', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'fixed_ips': [{u'address': u'10.0.0.2', - u'floating_ips': [], - u'meta': {}, - u'type': u'fixed', - u'version': 4}], - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-08 20:23:47.985999', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', -} - - -class TestNotification(tests_base.BaseTestCase): - - def setUp(self): - super(TestNotification, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override("connection", "log://", group='database') - self.CONF.set_override("backend_url", None, group="coordination") - self.CONF.set_override("store_events", False, group="notification") - self.CONF.set_override("disable_non_metric_meters", False, - group="notification") - self.setup_messaging(self.CONF) - self.srv = notification.NotificationService() - - def fake_get_notifications_manager(self, pm): - self.plugin = instance.Instance(pm) - return extension.ExtensionManager.make_test_instance( - [ - extension.Extension('test', - None, - None, - self.plugin) - ] - ) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - @mock.patch('ceilometer.event.endpoint.EventsNotificationEndpoint') - def _do_process_notification_manager_start(self, - fake_event_endpoint_class): - with mock.patch.object(self.srv, - '_get_notifications_manager') as get_nm: - get_nm.side_effect = self.fake_get_notifications_manager - self.srv.start() - self.addCleanup(self.srv.stop) - self.fake_event_endpoint = fake_event_endpoint_class.return_value - - def test_start_multiple_listeners(self): - urls = ["fake://vhost1", "fake://vhost2"] - self.CONF.set_override("messaging_urls", urls, group="notification") - self._do_process_notification_manager_start() - self.assertEqual(2, len(self.srv.listeners)) - - def test_process_notification(self): - self._do_process_notification_manager_start() - self.srv.pipeline_manager.pipelines[0] = mock.MagicMock() - - self.plugin.info([{'ctxt': TEST_NOTICE_CTXT, - 'publisher_id': 'compute.vagrant-precise', - 'event_type': 'compute.instance.create.end', - 'payload': TEST_NOTICE_PAYLOAD, - 'metadata': TEST_NOTICE_METADATA}]) - - self.assertEqual(1, len(self.srv.listeners[0].dispatcher.endpoints)) - self.assertTrue(self.srv.pipeline_manager.publisher.called) - - def test_process_notification_no_events(self): - self._do_process_notification_manager_start() - self.assertEqual(1, len(self.srv.listeners[0].dispatcher.endpoints)) - self.assertNotEqual(self.fake_event_endpoint, - self.srv.listeners[0].dispatcher.endpoints[0]) - - @mock.patch('ceilometer.pipeline.setup_event_pipeline', mock.MagicMock()) - def test_process_notification_with_events(self): - self.CONF.set_override("store_events", True, group="notification") - self._do_process_notification_manager_start() - self.assertEqual(2, len(self.srv.listeners[0].dispatcher.endpoints)) - self.assertEqual(self.fake_event_endpoint, - self.srv.listeners[0].dispatcher.endpoints[0]) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - @mock.patch('oslo_messaging.get_batch_notification_listener') - def test_unique_consumers(self, mock_listener): - - def fake_get_notifications_manager_dup_targets(pm): - plugin = instance.Instance(pm) - return extension.ExtensionManager.make_test_instance( - [extension.Extension('test', None, None, plugin), - extension.Extension('test', None, None, plugin)]) - - with mock.patch.object(self.srv, - '_get_notifications_manager') as get_nm: - get_nm.side_effect = fake_get_notifications_manager_dup_targets - self.srv.start() - self.addCleanup(self.srv.stop) - self.assertEqual(1, len(mock_listener.call_args_list)) - args, kwargs = mock_listener.call_args - self.assertEqual(1, len(args[1])) - self.assertIsInstance(args[1][0], oslo_messaging.Target) - - -class BaseRealNotification(tests_base.BaseTestCase): - def setup_pipeline(self, counter_names): - pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_pipeline', - 'interval': 5, - 'meters': counter_names, - 'sinks': ['test_sink'] - }], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ['test://'] - }] - }) - if six.PY3: - pipeline = pipeline.encode('utf-8') - - pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, - prefix="pipeline", - suffix="yaml") - return pipeline_cfg_file - - def setup_event_pipeline(self, event_names): - ev_pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_event', - 'events': event_names, - 'sinks': ['test_sink'] - }], - 'sinks': [{ - 'name': 'test_sink', - 'publishers': ['test://'] - }] - }) - if six.PY3: - ev_pipeline = ev_pipeline.encode('utf-8') - - ev_pipeline_cfg_file = fileutils.write_to_tempfile( - content=ev_pipeline, prefix="event_pipeline", suffix="yaml") - return ev_pipeline_cfg_file - - def setUp(self): - super(BaseRealNotification, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - # Dummy config file to avoid looking for system config - self.CONF([], project='ceilometer', validate_default_values=True) - self.setup_messaging(self.CONF, 'nova') - - pipeline_cfg_file = self.setup_pipeline(['instance', 'memory']) - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - - self.expected_samples = 2 - - self.CONF.set_override("backend_url", None, group="coordination") - self.CONF.set_override("store_events", True, group="notification") - self.CONF.set_override("disable_non_metric_meters", False, - group="notification") - - ev_pipeline_cfg_file = self.setup_event_pipeline( - ['compute.instance.*']) - self.expected_events = 1 - - self.CONF.set_override("event_pipeline_cfg_file", - ev_pipeline_cfg_file) - self.CONF.set_override( - "definitions_cfg_file", - self.path_get('etc/ceilometer/event_definitions.yaml'), - group='event') - self.publisher = test_publisher.TestPublisher("") - - def _check_notification_service(self): - self.srv.start() - self.addCleanup(self.srv.stop) - - notifier = messaging.get_notifier(self.transport, - "compute.vagrant-precise") - notifier.info({}, 'compute.instance.create.end', - TEST_NOTICE_PAYLOAD) - start = timeutils.utcnow() - while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: - if (len(self.publisher.samples) >= self.expected_samples and - len(self.publisher.events) >= self.expected_events): - break - - resources = list(set(s.resource_id for s in self.publisher.samples)) - self.assertEqual(self.expected_samples, len(self.publisher.samples)) - self.assertEqual(self.expected_events, len(self.publisher.events)) - self.assertEqual(["9f9d01b9-4a58-4271-9e27-398b21ab20d1"], resources) - - -class TestRealNotificationReloadablePipeline(BaseRealNotification): - - def setUp(self): - super(TestRealNotificationReloadablePipeline, self).setUp() - self.CONF.set_override('refresh_pipeline_cfg', True) - self.CONF.set_override('refresh_event_pipeline_cfg', True) - self.CONF.set_override('pipeline_polling_interval', 1) - self.srv = notification.NotificationService() - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_notification_pipeline_poller(self, fake_publisher_cls): - fake_publisher_cls.return_value = self.publisher - self.srv.tg = mock.MagicMock() - self.srv.start() - self.addCleanup(self.srv.stop) - - pipeline_poller_call = mock.call(1, self.srv.refresh_pipeline) - self.assertIn(pipeline_poller_call, - self.srv.tg.add_timer.call_args_list) - - def test_notification_reloaded_pipeline(self): - pipeline_cfg_file = self.setup_pipeline(['instance']) - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - - self.srv.start() - self.addCleanup(self.srv.stop) - - pipeline = self.srv.pipe_manager - - # Modify the collection targets - updated_pipeline_cfg_file = self.setup_pipeline(['vcpus', - 'disk.root.size']) - # Move/rename the updated pipeline file to the original pipeline - # file path as recorded in oslo config - shutil.move(updated_pipeline_cfg_file, pipeline_cfg_file) - self.srv.refresh_pipeline() - - self.assertNotEqual(pipeline, self.srv.pipe_manager) - - def test_notification_reloaded_event_pipeline(self): - ev_pipeline_cfg_file = self.setup_event_pipeline( - ['compute.instance.create.start']) - self.CONF.set_override("event_pipeline_cfg_file", ev_pipeline_cfg_file) - - self.CONF.set_override("store_events", True, group="notification") - - self.srv.start() - self.addCleanup(self.srv.stop) - - pipeline = self.srv.event_pipe_manager - - # Modify the collection targets - updated_ev_pipeline_cfg_file = self.setup_event_pipeline( - ['compute.instance.*']) - - # Move/rename the updated pipeline file to the original pipeline - # file path as recorded in oslo config - shutil.move(updated_ev_pipeline_cfg_file, ev_pipeline_cfg_file) - self.srv.refresh_pipeline() - - self.assertNotEqual(pipeline, self.srv.pipe_manager) - - -class TestRealNotification(BaseRealNotification): - - def setUp(self): - super(TestRealNotification, self).setUp() - self.srv = notification.NotificationService() - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_notification_service(self, fake_publisher_cls): - fake_publisher_cls.return_value = self.publisher - self._check_notification_service() - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_notification_service_error_topic(self, fake_publisher_cls): - fake_publisher_cls.return_value = self.publisher - self.srv.start() - self.addCleanup(self.srv.stop) - notifier = messaging.get_notifier(self.transport, - 'compute.vagrant-precise') - notifier.error({}, 'compute.instance.error', - TEST_NOTICE_PAYLOAD) - start = timeutils.utcnow() - while timeutils.delta_seconds(start, timeutils.utcnow()) < 600: - if len(self.publisher.events) >= self.expected_events: - break - self.assertEqual(self.expected_events, len(self.publisher.events)) - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_notification_disable_non_metrics(self, fake_publisher_cls): - self.CONF.set_override("disable_non_metric_meters", True, - group="notification") - # instance is a not a metric. we should only get back memory - self.expected_samples = 1 - fake_publisher_cls.return_value = self.publisher - self._check_notification_service() - self.assertEqual('memory', self.publisher.samples[0].name) - - @mock.patch.object(oslo_service.service.Service, 'stop') - def test_notification_service_start_abnormal(self, mocked): - try: - self.srv.stop() - except Exception: - pass - self.assertEqual(1, mocked.call_count) - - -class TestRealNotificationHA(BaseRealNotification): - - def setUp(self): - super(TestRealNotificationHA, self).setUp() - self.CONF.set_override('workload_partitioning', True, - group='notification') - self.srv = notification.NotificationService() - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_notification_service(self, fake_publisher_cls): - fake_publisher_cls.return_value = self.publisher - self._check_notification_service() - - @mock.patch('oslo_messaging.get_batch_notification_listener') - def test_reset_listener_on_refresh(self, mock_listener): - mock_listener.side_effect = [ - mock.MagicMock(), # main listener - mock.MagicMock(), # pipeline listener - mock.MagicMock(), # refresh pipeline listener - ] - - self.srv.start() - self.addCleanup(self.srv.stop) - - def _check_listener_targets(): - args, kwargs = mock_listener.call_args - self.assertEqual(20, len(args[1])) - self.assertIsInstance(args[1][0], oslo_messaging.Target) - - _check_listener_targets() - - listener = self.srv.pipeline_listener - self.srv._configure_pipeline_listener() - self.assertIsNot(listener, self.srv.pipeline_listener) - - _check_listener_targets() - - @mock.patch('oslo_messaging.get_batch_notification_listener') - def test_retain_common_targets_on_refresh(self, mock_listener): - with mock.patch('ceilometer.coordination.PartitionCoordinator' - '.extract_my_subset', return_value=[1, 2]): - self.srv.start() - self.addCleanup(self.srv.stop) - listened_before = [target.topic for target in - mock_listener.call_args[0][1]] - self.assertEqual(4, len(listened_before)) - with mock.patch('ceilometer.coordination.PartitionCoordinator' - '.extract_my_subset', return_value=[1, 3]): - self.srv._refresh_agent(None) - listened_after = [target.topic for target in - mock_listener.call_args[0][1]] - self.assertEqual(4, len(listened_after)) - common = set(listened_before) & set(listened_after) - for topic in common: - self.assertTrue(topic.endswith('1')) - - @mock.patch('oslo_messaging.get_batch_notification_listener') - def test_notify_to_relevant_endpoint(self, mock_listener): - self.srv.start() - self.addCleanup(self.srv.stop) - - targets = mock_listener.call_args[0][1] - self.assertIsNotEmpty(targets) - - endpoints = {} - for endpoint in mock_listener.call_args[0][2]: - self.assertEqual(1, len(endpoint.publish_context.pipelines)) - pipe = list(endpoint.publish_context.pipelines)[0] - endpoints[pipe.name] = endpoint - - notifiers = [] - notifiers.extend(self.srv.pipe_manager.transporters[0][2]) - notifiers.extend(self.srv.event_pipe_manager.transporters[0][2]) - for notifier in notifiers: - filter_rule = endpoints[notifier.publisher_id].filter_rule - self.assertEqual(True, filter_rule.match(None, - notifier.publisher_id, - None, None, None)) - - @mock.patch('oslo_messaging.Notifier.sample') - def test_broadcast_to_relevant_pipes_only(self, mock_notifier): - self.srv.start() - self.addCleanup(self.srv.stop) - for endpoint in self.srv.listeners[0].dispatcher.endpoints: - if (hasattr(endpoint, 'filter_rule') and - not endpoint.filter_rule.match(None, None, 'nonmatching.end', - None, None)): - continue - endpoint.info([{ - 'ctxt': TEST_NOTICE_CTXT, - 'publisher_id': 'compute.vagrant-precise', - 'event_type': 'nonmatching.end', - 'payload': TEST_NOTICE_PAYLOAD, - 'metadata': TEST_NOTICE_METADATA}]) - self.assertFalse(mock_notifier.called) - for endpoint in self.srv.listeners[0].dispatcher.endpoints: - if (hasattr(endpoint, 'filter_rule') and - not endpoint.filter_rule.match(None, None, - 'compute.instance.create.end', - None, None)): - continue - endpoint.info([{ - 'ctxt': TEST_NOTICE_CTXT, - 'publisher_id': 'compute.vagrant-precise', - 'event_type': 'compute.instance.create.end', - 'payload': TEST_NOTICE_PAYLOAD, - 'metadata': TEST_NOTICE_METADATA}]) - - self.assertTrue(mock_notifier.called) - self.assertEqual(3, mock_notifier.call_count) - self.assertEqual('pipeline.event', - mock_notifier.call_args_list[0][1]['event_type']) - self.assertEqual('ceilometer.pipeline', - mock_notifier.call_args_list[1][1]['event_type']) - self.assertEqual('ceilometer.pipeline', - mock_notifier.call_args_list[2][1]['event_type']) - - -class TestRealNotificationMultipleAgents(tests_base.BaseTestCase): - def setup_pipeline(self, transformers): - pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_pipeline', - 'interval': 5, - 'meters': ['instance', 'memory'], - 'sinks': ['test_sink'] - }], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': transformers, - 'publishers': ['test://'] - }] - }) - if six.PY3: - pipeline = pipeline.encode('utf-8') - - pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, - prefix="pipeline", - suffix="yaml") - return pipeline_cfg_file - - def setUp(self): - super(TestRealNotificationMultipleAgents, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF([], project='ceilometer', validate_default_values=True) - self.setup_messaging(self.CONF, 'nova') - - pipeline_cfg_file = self.setup_pipeline(['instance', 'memory']) - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - self.CONF.set_override("backend_url", None, group="coordination") - self.CONF.set_override("store_events", False, group="notification") - self.CONF.set_override("disable_non_metric_meters", False, - group="notification") - self.CONF.set_override('workload_partitioning', True, - group='notification') - self.CONF.set_override('pipeline_processing_queues', 2, - group='notification') - self.publisher = test_publisher.TestPublisher("") - self.publisher2 = test_publisher.TestPublisher("") - - def _check_notifications(self, fake_publisher_cls): - fake_publisher_cls.side_effect = [self.publisher, self.publisher2] - - self.srv = notification.NotificationService() - self.srv2 = notification.NotificationService() - with mock.patch('ceilometer.coordination.PartitionCoordinator' - '._get_members', return_value=['harry', 'lloyd']): - with mock.patch('uuid.uuid4', return_value='harry'): - self.srv.start() - self.addCleanup(self.srv.stop) - with mock.patch('uuid.uuid4', return_value='lloyd'): - self.srv2.start() - self.addCleanup(self.srv2.stop) - - notifier = messaging.get_notifier(self.transport, - "compute.vagrant-precise") - payload1 = TEST_NOTICE_PAYLOAD.copy() - payload1['instance_id'] = '0' - notifier.info({}, 'compute.instance.create.end', payload1) - payload2 = TEST_NOTICE_PAYLOAD.copy() - payload2['instance_id'] = '1' - notifier.info({}, 'compute.instance.create.end', payload2) - self.expected_samples = 4 - start = timeutils.utcnow() - with mock.patch('six.moves.builtins.hash', lambda x: int(x)): - while timeutils.delta_seconds(start, timeutils.utcnow()) < 60: - if (len(self.publisher.samples + self.publisher2.samples) >= - self.expected_samples): - break - - self.assertEqual(2, len(self.publisher.samples)) - self.assertEqual(2, len(self.publisher2.samples)) - self.assertEqual(1, len(set( - s.resource_id for s in self.publisher.samples))) - self.assertEqual(1, len(set( - s.resource_id for s in self.publisher2.samples))) - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_multiple_agents_no_transform(self, fake_publisher_cls): - pipeline_cfg_file = self.setup_pipeline([]) - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - self._check_notifications(fake_publisher_cls) - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_multiple_agents_transform(self, fake_publisher_cls): - pipeline_cfg_file = self.setup_pipeline( - [{ - 'name': 'unit_conversion', - 'parameters': { - 'source': {}, - 'target': {'name': 'cpu_mins', - 'unit': 'min', - 'scale': 'volume'}, - } - }]) - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - self._check_notifications(fake_publisher_cls) - - @mock.patch('ceilometer.publisher.test.TestPublisher') - def test_multiple_agents_multiple_transform(self, fake_publisher_cls): - pipeline_cfg_file = self.setup_pipeline( - [{ - 'name': 'unit_conversion', - 'parameters': { - 'source': {}, - 'target': {'name': 'cpu_mins', - 'unit': 'min', - 'scale': 'volume'}, - } - }, { - 'name': 'unit_conversion', - 'parameters': { - 'source': {}, - 'target': {'name': 'cpu_mins', - 'unit': 'min', - 'scale': 'volume'}, - } - }]) - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - self._check_notifications(fake_publisher_cls) diff --git a/ceilometer/tests/integration/__init__.py b/ceilometer/tests/integration/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/integration/gabbi/__init__.py b/ceilometer/tests/integration/gabbi/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/integration/gabbi/gabbits-live/autoscaling.yaml b/ceilometer/tests/integration/gabbi/gabbits-live/autoscaling.yaml deleted file mode 100644 index 437d35dd..00000000 --- a/ceilometer/tests/integration/gabbi/gabbits-live/autoscaling.yaml +++ /dev/null @@ -1,175 +0,0 @@ -defaults: - request_headers: - x-auth-token: $ENVIRON['ADMIN_TOKEN'] - -tests: - - name: list alarms none - desc: Lists alarms, none yet exist - url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms - method: GET - response_strings: - - "[]" - - - name: list servers none - desc: List servers, none yet exists - url: $ENVIRON['NOVA_SERVICE_URL']/servers - method: GET - response_strings: - - "[]" - - - name: create stack - desc: Create an autoscaling stack - url: $ENVIRON['HEAT_SERVICE_URL']/stacks - method: POST - request_headers: - content-type: application/json - data: <@create_stack.json - status: 201 - - - name: waiting for stack creation - desc: Wait for the second event on the stack resource, it can be a success or failure - url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test/events?resource_name=integration_test - redirects: true - method: GET - status: 200 - poll: - count: 300 - delay: 1 - response_json_paths: - $.events[1].resource_name: integration_test - - - name: control stack status - desc: Checks the stack have been created successfully - url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test - redirects: true - method: GET - status: 200 - poll: - count: 5 - delay: 1 - response_json_paths: - $.stack.stack_status: "CREATE_COMPLETE" - - - name: list servers - desc: Wait the autoscaling stack grow to two servers - url: $ENVIRON['NOVA_SERVICE_URL']/servers/detail - method: GET - poll: - count: 600 - delay: 1 - response_json_paths: - $.servers[0].metadata.'metering.server_group': $RESPONSE['$.stack.id'] - $.servers[1].metadata.'metering.server_group': $RESPONSE['$.stack.id'] - $.servers[0].status: ACTIVE - $.servers[1].status: ACTIVE - $.servers.`len`: 2 - - - name: check gnocchi resources - desc: Check the gnocchi resources for this two servers exists - url: $ENVIRON['GNOCCHI_SERVICE_URL']/v1/resource/instance - method: GET - poll: - count: 30 - delay: 1 - response_strings: - - '"id": "$RESPONSE["$.servers[0].id"]"' - - '"id": "$RESPONSE["$.servers[1].id"]"' - - - name: check alarm - desc: Check the aodh alarm and its state - url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms - method: GET - poll: - count: 30 - delay: 1 - response_strings: - - "integration_test-cpu_alarm_high-" - response_json_paths: - $[0].state: alarm - - - name: get stack location for update - desc: Get the stack location - url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test - method: GET - status: 302 - - - name: update stack - desc: Update an autoscaling stack - url: $LOCATION - method: PUT - request_headers: - content-type: application/json - data: <@update_stack.json - status: 202 - - - name: waiting for stack update - desc: Wait for the third event on the stack resource, it can be a success or failure - url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test/events?resource_name=integration_test - redirects: true - method: GET - status: 200 - poll: - count: 300 - delay: 1 - response_json_paths: - $.events[3].resource_name: integration_test - - - name: control stack status - desc: Checks the stack have been created successfully - url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test - redirects: true - method: GET - status: 200 - poll: - count: 5 - delay: 1 - response_json_paths: - $.stack.stack_status: "UPDATE_COMPLETE" - - - name: list servers - desc: Wait the autoscaling stack shrink to one server - url: $ENVIRON['NOVA_SERVICE_URL']/servers/detail - method: GET - poll: - count: 600 - delay: 1 - response_json_paths: - $.servers[0].metadata.'metering.server_group': $RESPONSE['$.stack.id'] - $.servers[0].status: ACTIVE - $.servers.`len`: 1 - - - name: get stack location - desc: Get the stack location - url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test - method: GET - status: 302 - - - name: delete stack - desc: Delete the stack - url: $LOCATION - method: DELETE - status: 204 - - - name: get deleted stack - desc: Check the stack have been deleted - url: $ENVIRON['HEAT_SERVICE_URL']/stacks/integration_test - redirects: true - method: GET - poll: - count: 240 - delay: 1 - status: 404 - - - name: list alarms deleted - desc: List alarms, no more exist - url: $ENVIRON['AODH_SERVICE_URL']/v2/alarms - method: GET - response_strings: - - "[]" - - - name: list servers deleted - desc: List servers, no more exists - url: $ENVIRON['NOVA_SERVICE_URL']/servers - method: GET - response_strings: - - "[]" diff --git a/ceilometer/tests/integration/gabbi/gabbits-live/create_stack.json b/ceilometer/tests/integration/gabbi/gabbits-live/create_stack.json deleted file mode 100644 index 7b3d3b4d..00000000 --- a/ceilometer/tests/integration/gabbi/gabbits-live/create_stack.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "stack_name": "integration_test", - "template": { - "heat_template_version": "2013-05-23", - "description": "Integration Test AutoScaling with heat+ceilometer+gnocchi+aodh", - "resources": { - "asg": { - "type": "OS::Heat::AutoScalingGroup", - "properties": { - "min_size": 1, - "max_size": 2, - "resource": { - "type": "OS::Nova::Server", - "properties": { - "networks": [{ "network": "private" }], - "flavor": "m1.tiny", - "image": "$ENVIRON['GLANCE_IMAGE_NAME']", - "metadata": { - "metering.server_group": { "get_param": "OS::stack_id" } - }, - "user_data_format": "RAW", - "user_data": {"Fn::Join": ["", [ - "#!/bin/sh\n", - "echo 'Loading CPU'\n", - "set -v\n", - "cat /dev/urandom > /dev/null\n" - ]]} - } - } - } - }, - "web_server_scaleup_policy": { - "type": "OS::Heat::ScalingPolicy", - "properties": { - "adjustment_type": "change_in_capacity", - "auto_scaling_group_id": { "get_resource": "asg" }, - "cooldown": 2, - "scaling_adjustment": 1 - } - }, - "cpu_alarm_high": { - "type": "OS::Ceilometer::GnocchiAggregationByResourcesAlarm", - "properties": { - "description": "Scale-up if the mean CPU > 10% on 1 minute", - "metric": "cpu_util", - "aggregation_method": "mean", - "granularity": 60, - "evaluation_periods": 1, - "threshold": 10, - "comparison_operator": "gt", - "alarm_actions": [ - { "get_attr": [ "web_server_scaleup_policy", "alarm_url" ] } - ], - "resource_type": "instance", - "query": { - "str_replace": { - "template": "{\"and\": [{\"=\": {\"server_group\": \"stack_id\"}}, {\"=\": {\"ended_at\": null}}]}", - "params": { - "stack_id": { "get_param": "OS::stack_id" } - } - } - } - } - } - } - } -} diff --git a/ceilometer/tests/integration/gabbi/gabbits-live/update_stack.json b/ceilometer/tests/integration/gabbi/gabbits-live/update_stack.json deleted file mode 100644 index 8897d399..00000000 --- a/ceilometer/tests/integration/gabbi/gabbits-live/update_stack.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "template": { - "heat_template_version": "2013-05-23", - "description": "Integration Test AutoScaling with heat+ceilometer+gnocchi+aodh", - "resources": { - "asg": { - "type": "OS::Heat::AutoScalingGroup", - "properties": { - "min_size": 1, - "max_size": 2, - "resource": { - "type": "OS::Nova::Server", - "properties": { - "networks": [{ "network": "private" }], - "flavor": "m1.tiny", - "image": "$ENVIRON['GLANCE_IMAGE_NAME']", - "metadata": { - "metering.server_group": { "get_param": "OS::stack_id" } - }, - "user_data_format": "RAW", - "user_data": {"Fn::Join": ["", [ - "#!/bin/sh\n", - "echo 'Loading CPU'\n", - "set -v\n", - "cat /dev/urandom > /dev/null\n" - ]]} - } - } - } - }, - "web_server_scaledown_policy": { - "type": "OS::Heat::ScalingPolicy", - "properties": { - "adjustment_type": "change_in_capacity", - "auto_scaling_group_id": { "get_resource": "asg" }, - "cooldown": 2, - "scaling_adjustment": -1 - } - }, - "cpu_alarm_high": { - "type": "OS::Ceilometer::GnocchiAggregationByResourcesAlarm", - "properties": { - "description": "Scale-down if the mean CPU > 10% on 1 minute", - "metric": "cpu_util", - "aggregation_method": "mean", - "granularity": 60, - "evaluation_periods": 1, - "threshold": 10, - "comparison_operator": "gt", - "alarm_actions": [ - { "get_attr": [ "web_server_scaledown_policy", "alarm_url" ] } - ], - "resource_type": "instance", - "query": { - "str_replace": { - "template": "{\"and\": [{\"=\": {\"server_group\": \"stack_id\"}}, {\"=\": {\"ended_at\": null}}]}", - "params": { - "stack_id": { "get_param": "OS::stack_id" } - } - } - } - } - } - } - } -} diff --git a/ceilometer/tests/integration/gabbi/test_gabbi_live.py b/ceilometer/tests/integration/gabbi/test_gabbi_live.py deleted file mode 100644 index b347b556..00000000 --- a/ceilometer/tests/integration/gabbi/test_gabbi_live.py +++ /dev/null @@ -1,40 +0,0 @@ -# -# Copyright 2015 Red Hat. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A test module to exercise the Gnocchi API with gabbi.""" - -import os - -from gabbi import driver - - -TESTS_DIR = 'gabbits-live' - - -def load_tests(loader, tests, pattern): - """Provide a TestSuite to the discovery process.""" - NEEDED_ENV = ["AODH_SERVICE_URL", "GNOCCHI_SERVICE_URL", - "HEAT_SERVICE_URL", "NOVA_SERVICE_URL", - "GLANCE_IMAGE_NAME", "ADMIN_TOKEN"] - - for env_variable in NEEDED_ENV: - if not os.getenv(env_variable): - if os.getenv("GABBI_LIVE_FAIL_IF_NO_TEST"): - raise RuntimeError('%s is not set' % env_variable) - else: - return - - test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) - return driver.build_tests(test_dir, loader, host="localhost", port=8041) diff --git a/ceilometer/tests/integration/hooks/post_test_hook.sh b/ceilometer/tests/integration/hooks/post_test_hook.sh deleted file mode 100755 index fbc69a7e..00000000 --- a/ceilometer/tests/integration/hooks/post_test_hook.sh +++ /dev/null @@ -1,95 +0,0 @@ -#!/bin/bash -xe - -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This script is executed inside post_test_hook function in devstack gate. - -function generate_testr_results { - if [ -f .testrepository/0 ]; then - sudo .tox/functional/bin/testr last --subunit > $WORKSPACE/testrepository.subunit - sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit - sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html - sudo gzip -9 $BASE/logs/testrepository.subunit - sudo gzip -9 $BASE/logs/testr_results.html - sudo chown jenkins:jenkins $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz - sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz - fi -} - -# If we're running in the gate find our keystone endpoint to give to -# gabbi tests and do a chown. Otherwise the existing environment -# should provide URL and TOKEN. -if [ -d $BASE/new/devstack ]; then - export CEILOMETER_DIR="$BASE/new/ceilometer" - STACK_USER=stack - sudo chown -R $STACK_USER:stack $CEILOMETER_DIR - source $BASE/new/devstack/openrc admin admin - # Go to the ceilometer dir - cd $CEILOMETER_DIR -fi - -openstack catalog list -export AODH_SERVICE_URL=$(openstack catalog show alarming -c endpoints -f value | awk '/public/{print $2}') -export GNOCCHI_SERVICE_URL=$(openstack catalog show metric -c endpoints -f value | awk '/public/{print $2}') -export HEAT_SERVICE_URL=$(openstack catalog show orchestration -c endpoints -f value | awk '/public/{print $2}') -export NOVA_SERVICE_URL=$(openstack catalog show compute -c endpoints -f value | awk '/public/{print $2}') -export GLANCE_IMAGE_NAME=$(openstack image list | awk '/ cirros.*uec /{print $4}') -export ADMIN_TOKEN=$(openstack token issue -c id -f value) - -# Run tests -echo "Running telemetry integration test suite" -set +e - -sudo -E -H -u ${STACK_USER:-${USER}} tox -eintegration -EXIT_CODE=$? - -echo "* Message queue status:" -sudo rabbitmqctl list_queues | grep -e \\.sample -e \\.info - -if [ $EXIT_CODE -ne 0 ] ; then - set +x - echo "* Heat stack:" - openstack stack show integration_test - echo "* Alarm list:" - ceilometer alarm-list - echo "* Nova instance list:" - openstack server list - - echo "* Gnocchi instance list:" - gnocchi resource list -t instance - for instance_id in $(openstack server list -f value -c ID); do - echo "* Nova instance detail:" - openstack server show $instance_id - echo "* Gnocchi instance detail:" - gnocchi resource show -t instance $instance_id - echo "* Gnocchi measures for instance ${instance_id}:" - gnocchi measures show -r $instance_id cpu_util - done - - gnocchi status - - # Be sure to source Gnocchi settings before - source $BASE/new/gnocchi/devstack/settings - echo "* Unprocessed measures:" - sudo find $GNOCCHI_DATA_DIR/measure - - set -x -fi - -set -e - -# Collect and parse result -if [ -n "$CEILOMETER_DIR" ]; then - generate_testr_results -fi -exit $EXIT_CODE diff --git a/ceilometer/tests/pipeline_base.py b/ceilometer/tests/pipeline_base.py deleted file mode 100644 index 6731cb7b..00000000 --- a/ceilometer/tests/pipeline_base.py +++ /dev/null @@ -1,2157 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2013 Intel Corp. -# -# Authors: Yunhong Jiang -# Julien Danjou -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import copy -import datetime -import traceback - -import mock -from oslo_utils import timeutils -from oslotest import base -from oslotest import mockpatch -import six -from stevedore import extension - -from ceilometer import pipeline -from ceilometer import publisher -from ceilometer.publisher import test as test_publisher -from ceilometer import sample -from ceilometer import transformer -from ceilometer.transformer import accumulator -from ceilometer.transformer import arithmetic -from ceilometer.transformer import conversions - - -@six.add_metaclass(abc.ABCMeta) -class BasePipelineTestCase(base.BaseTestCase): - @staticmethod - def fake_tem_init(): - """Fake a transformerManager for pipeline. - - The faked entry point setting is below: - update: TransformerClass - except: TransformerClassException - drop: TransformerClassDrop - """ - pass - - def fake_tem_get_ext(self, name): - class_name_ext = { - 'update': self.TransformerClass, - 'except': self.TransformerClassException, - 'drop': self.TransformerClassDrop, - 'cache': accumulator.TransformerAccumulator, - 'aggregator': conversions.AggregatorTransformer, - 'unit_conversion': conversions.ScalingTransformer, - 'rate_of_change': conversions.RateOfChangeTransformer, - 'arithmetic': arithmetic.ArithmeticTransformer, - 'delta': conversions.DeltaTransformer, - } - - if name in class_name_ext: - return extension.Extension(name, None, - class_name_ext[name], - None, - ) - - raise KeyError(name) - - def get_publisher(self, url, namespace=''): - fake_drivers = {'test://': test_publisher.TestPublisher, - 'new://': test_publisher.TestPublisher, - 'except://': self.PublisherClassException} - return fake_drivers[url](url) - - class PublisherClassException(publisher.PublisherBase): - def publish_samples(self, samples): - raise Exception() - - def publish_events(self, events): - raise Exception() - - class TransformerClass(transformer.TransformerBase): - samples = [] - grouping_keys = ['counter_name'] - - def __init__(self, append_name='_update'): - self.__class__.samples = [] - self.append_name = append_name - - @staticmethod - def flush(): - return [] - - def handle_sample(self, counter): - self.__class__.samples.append(counter) - newname = getattr(counter, 'name') + self.append_name - return sample.Sample( - name=newname, - type=counter.type, - volume=counter.volume, - unit=counter.unit, - user_id=counter.user_id, - project_id=counter.project_id, - resource_id=counter.resource_id, - timestamp=counter.timestamp, - resource_metadata=counter.resource_metadata, - ) - - class TransformerClassDrop(transformer.TransformerBase): - samples = [] - grouping_keys = ['resource_id'] - - def __init__(self): - self.__class__.samples = [] - - def handle_sample(self, counter): - self.__class__.samples.append(counter) - - class TransformerClassException(object): - grouping_keys = ['resource_id'] - - @staticmethod - def handle_sample(counter): - raise Exception() - - def setUp(self): - super(BasePipelineTestCase, self).setUp() - - self.test_counter = sample.Sample( - name='a', - type=sample.TYPE_GAUGE, - volume=1, - unit='B', - user_id="test_user", - project_id="test_proj", - resource_id="test_resource", - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={} - ) - - self.useFixture(mockpatch.PatchObject( - publisher, 'get_publisher', side_effect=self.get_publisher)) - - self.transformer_manager = mock.MagicMock() - self.transformer_manager.__getitem__.side_effect = \ - self.fake_tem_get_ext - - self._setup_pipeline_cfg() - - self._reraise_exception = True - self.useFixture(mockpatch.Patch( - 'ceilometer.pipeline.LOG.exception', - side_effect=self._handle_reraise_exception)) - - def _handle_reraise_exception(self, msg): - if self._reraise_exception: - raise Exception(traceback.format_exc()) - - @abc.abstractmethod - def _setup_pipeline_cfg(self): - """Setup the appropriate form of pipeline config.""" - - @abc.abstractmethod - def _augment_pipeline_cfg(self): - """Augment the pipeline config with an additional element.""" - - @abc.abstractmethod - def _break_pipeline_cfg(self): - """Break the pipeline config with a malformed element.""" - - @abc.abstractmethod - def _dup_pipeline_name_cfg(self): - """Break the pipeline config with duplicate pipeline name.""" - - @abc.abstractmethod - def _set_pipeline_cfg(self, field, value): - """Set a field to a value in the pipeline config.""" - - @abc.abstractmethod - def _extend_pipeline_cfg(self, field, value): - """Extend an existing field in the pipeline config with a value.""" - - @abc.abstractmethod - def _unset_pipeline_cfg(self, field): - """Clear an existing field in the pipeline config.""" - - def _exception_create_pipelinemanager(self): - self.assertRaises(pipeline.PipelineException, - pipeline.PipelineManager, - self.pipeline_cfg, - self.transformer_manager) - - def test_no_counters(self): - self._unset_pipeline_cfg('counters') - self._exception_create_pipelinemanager() - - def test_no_transformers(self): - self._unset_pipeline_cfg('transformers') - pipeline.PipelineManager(self.pipeline_cfg, self.transformer_manager) - - def test_no_name(self): - self._unset_pipeline_cfg('name') - self._exception_create_pipelinemanager() - - def test_no_interval(self): - self._unset_pipeline_cfg('interval') - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - - pipe = pipeline_manager.pipelines[0] - self.assertEqual(600, pipe.get_interval()) - - def test_no_publishers(self): - self._unset_pipeline_cfg('publishers') - self._exception_create_pipelinemanager() - - def test_invalid_resources(self): - invalid_resource = {'invalid': 1} - self._set_pipeline_cfg('resources', invalid_resource) - self._exception_create_pipelinemanager() - - def test_check_counters_include_exclude_same(self): - counter_cfg = ['a', '!a'] - self._set_pipeline_cfg('counters', counter_cfg) - self._exception_create_pipelinemanager() - - def test_check_counters_include_exclude(self): - counter_cfg = ['a', '!b'] - self._set_pipeline_cfg('counters', counter_cfg) - self._exception_create_pipelinemanager() - - def test_check_counters_wildcard_included(self): - counter_cfg = ['a', '*'] - self._set_pipeline_cfg('counters', counter_cfg) - self._exception_create_pipelinemanager() - - def test_check_publishers_invalid_publisher(self): - publisher_cfg = ['test_invalid'] - self._set_pipeline_cfg('publishers', publisher_cfg) - - def test_invalid_string_interval(self): - self._set_pipeline_cfg('interval', 'string') - self._exception_create_pipelinemanager() - - def test_check_transformer_invalid_transformer(self): - transformer_cfg = [ - {'name': "test_invalid", - 'parameters': {}} - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._exception_create_pipelinemanager() - - def test_get_interval(self): - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - - pipe = pipeline_manager.pipelines[0] - self.assertEqual(5, pipe.get_interval()) - - def test_publisher_transformer_invoked(self): - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1, len(self.TransformerClass.samples)) - self.assertEqual('a_update', getattr(publisher.samples[0], "name")) - self.assertEqual('a', - getattr(self.TransformerClass.samples[0], "name")) - - def test_multiple_included_counters(self): - counter_cfg = ['a', 'b'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - - self.test_counter = sample.Sample( - name='b', - type=self.test_counter.type, - volume=self.test_counter.volume, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - ) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.assertEqual(2, len(publisher.samples)) - self.assertEqual(2, len(self.TransformerClass.samples)) - self.assertEqual('a_update', getattr(publisher.samples[0], "name")) - self.assertEqual('b_update', getattr(publisher.samples[1], "name")) - - @mock.patch('ceilometer.pipeline.LOG') - def test_none_volume_counter(self, LOG): - self._set_pipeline_cfg('counters', ['empty_volume']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - publisher = pipeline_manager.pipelines[0].publishers[0] - - test_s = sample.Sample( - name='empty_volume', - type=self.test_counter.type, - volume=None, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - ) - - with pipeline_manager.publisher() as p: - p([test_s]) - - LOG.warning.assert_called_once_with( - 'metering data %(counter_name)s for %(resource_id)s ' - '@ %(timestamp)s has no volume (volume: %(counter_volume)s), the ' - 'sample will be dropped' - % {'counter_name': test_s.name, - 'resource_id': test_s.resource_id, - 'timestamp': test_s.timestamp, - 'counter_volume': test_s.volume}) - - self.assertEqual(0, len(publisher.samples)) - - @mock.patch('ceilometer.pipeline.LOG') - def test_fake_volume_counter(self, LOG): - self._set_pipeline_cfg('counters', ['fake_volume']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - publisher = pipeline_manager.pipelines[0].publishers[0] - - test_s = sample.Sample( - name='fake_volume', - type=self.test_counter.type, - volume='fake_value', - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - ) - - with pipeline_manager.publisher() as p: - p([test_s]) - - LOG.warning.assert_called_once_with( - 'metering data %(counter_name)s for %(resource_id)s ' - '@ %(timestamp)s has volume which is not a number ' - '(volume: %(counter_volume)s), the sample will be dropped' - % {'counter_name': test_s.name, - 'resource_id': test_s.resource_id, - 'timestamp': test_s.timestamp, - 'counter_volume': test_s.volume}) - - self.assertEqual(0, len(publisher.samples)) - - def test_counter_dont_match(self): - counter_cfg = ['nomatch'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.samples)) - self.assertEqual(0, publisher.calls) - - def test_wildcard_counter(self): - counter_cfg = ['*'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1, len(self.TransformerClass.samples)) - self.assertEqual('a_update', getattr(publisher.samples[0], "name")) - - def test_wildcard_excluded_counters(self): - counter_cfg = ['*', '!a'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertFalse(pipeline_manager.pipelines[0].support_meter('a')) - - def test_wildcard_excluded_counters_not_excluded(self): - counter_cfg = ['*', '!b'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1, len(self.TransformerClass.samples)) - self.assertEqual('a_update', getattr(publisher.samples[0], "name")) - - def test_all_excluded_counters_not_excluded(self): - counter_cfg = ['!b', '!c'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1, len(self.TransformerClass.samples)) - self.assertEqual('a_update', getattr(publisher.samples[0], "name")) - self.assertEqual('a', - getattr(self.TransformerClass.samples[0], "name")) - - def test_all_excluded_counters_is_excluded(self): - counter_cfg = ['!a', '!c'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertFalse(pipeline_manager.pipelines[0].support_meter('a')) - self.assertTrue(pipeline_manager.pipelines[0].support_meter('b')) - self.assertFalse(pipeline_manager.pipelines[0].support_meter('c')) - - def test_wildcard_and_excluded_wildcard_counters(self): - counter_cfg = ['*', '!disk.*'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertFalse(pipeline_manager.pipelines[0]. - support_meter('disk.read.bytes')) - self.assertTrue(pipeline_manager.pipelines[0].support_meter('cpu')) - - def test_included_counter_and_wildcard_counters(self): - counter_cfg = ['cpu', 'disk.*'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertTrue(pipeline_manager.pipelines[0]. - support_meter('disk.read.bytes')) - self.assertTrue(pipeline_manager.pipelines[0].support_meter('cpu')) - self.assertFalse(pipeline_manager.pipelines[0]. - support_meter('instance')) - - def test_excluded_counter_and_excluded_wildcard_counters(self): - counter_cfg = ['!cpu', '!disk.*'] - self._set_pipeline_cfg('counters', counter_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertFalse(pipeline_manager.pipelines[0]. - support_meter('disk.read.bytes')) - self.assertFalse(pipeline_manager.pipelines[0].support_meter('cpu')) - self.assertTrue(pipeline_manager.pipelines[0]. - support_meter('instance')) - - def test_multiple_pipeline(self): - self._augment_pipeline_cfg() - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.test_counter = sample.Sample( - name='b', - type=self.test_counter.type, - volume=self.test_counter.volume, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - ) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1, publisher.calls) - self.assertEqual('a_update', getattr(publisher.samples[0], "name")) - new_publisher = pipeline_manager.pipelines[1].publishers[0] - self.assertEqual(1, len(new_publisher.samples)) - self.assertEqual(1, new_publisher.calls) - self.assertEqual('b_new', getattr(new_publisher.samples[0], "name")) - self.assertEqual(2, len(self.TransformerClass.samples)) - self.assertEqual('a', - getattr(self.TransformerClass.samples[0], "name")) - self.assertEqual('b', - getattr(self.TransformerClass.samples[1], "name")) - - def test_multiple_pipeline_exception(self): - self._reraise_exception = False - self._break_pipeline_cfg() - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.test_counter = sample.Sample( - name='b', - type=self.test_counter.type, - volume=self.test_counter.volume, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - ) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, publisher.calls) - self.assertEqual(1, len(publisher.samples)) - self.assertEqual('a_update', getattr(publisher.samples[0], "name")) - self.assertEqual(2, len(self.TransformerClass.samples)) - self.assertEqual('a', - getattr(self.TransformerClass.samples[0], "name")) - self.assertEqual('b', - getattr(self.TransformerClass.samples[1], "name")) - - def test_none_transformer_pipeline(self): - self._set_pipeline_cfg('transformers', None) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1, publisher.calls) - self.assertEqual('a', getattr(publisher.samples[0], 'name')) - - def test_empty_transformer_pipeline(self): - self._set_pipeline_cfg('transformers', []) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1, publisher.calls) - self.assertEqual('a', getattr(publisher.samples[0], 'name')) - - def test_multiple_transformer_same_class(self): - transformer_cfg = [ - { - 'name': 'update', - 'parameters': {} - }, - { - 'name': 'update', - 'parameters': {} - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, publisher.calls) - self.assertEqual(1, len(publisher.samples)) - self.assertEqual('a_update_update', - getattr(publisher.samples[0], 'name')) - self.assertEqual(2, len(self.TransformerClass.samples)) - self.assertEqual('a', - getattr(self.TransformerClass.samples[0], 'name')) - self.assertEqual('a_update', - getattr(self.TransformerClass.samples[1], 'name')) - - def test_multiple_transformer_same_class_different_parameter(self): - transformer_cfg = [ - { - 'name': 'update', - 'parameters': - { - "append_name": "_update", - } - }, - { - 'name': 'update', - 'parameters': - { - "append_name": "_new", - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.assertEqual(2, len(self.TransformerClass.samples)) - self.assertEqual('a', - getattr(self.TransformerClass.samples[0], 'name')) - self.assertEqual('a_update', - getattr(self.TransformerClass.samples[1], 'name')) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, - len(publisher.samples)) - self.assertEqual('a_update_new', - getattr(publisher.samples[0], 'name')) - - def test_multiple_transformer_drop_transformer(self): - transformer_cfg = [ - { - 'name': 'update', - 'parameters': - { - "append_name": "_update", - } - }, - { - 'name': 'drop', - 'parameters': {} - }, - { - 'name': 'update', - 'parameters': - { - "append_name": "_new", - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.samples)) - self.assertEqual(1, len(self.TransformerClass.samples)) - self.assertEqual('a', - getattr(self.TransformerClass.samples[0], 'name')) - self.assertEqual(1, - len(self.TransformerClassDrop.samples)) - self.assertEqual('a_update', - getattr(self.TransformerClassDrop.samples[0], 'name')) - - def test_multiple_publisher(self): - self._set_pipeline_cfg('publishers', ['test://', 'new://']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - new_publisher = pipeline_manager.pipelines[0].publishers[1] - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1, len(new_publisher.samples)) - self.assertEqual('a_update', - getattr(new_publisher.samples[0], 'name')) - self.assertEqual('a_update', - getattr(publisher.samples[0], 'name')) - - def test_multiple_publisher_isolation(self): - self._reraise_exception = False - self._set_pipeline_cfg('publishers', ['except://', 'new://']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - new_publisher = pipeline_manager.pipelines[0].publishers[1] - self.assertEqual(1, len(new_publisher.samples)) - self.assertEqual('a_update', - getattr(new_publisher.samples[0], 'name')) - - def test_multiple_counter_pipeline(self): - self._set_pipeline_cfg('counters', ['a', 'b']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter, - sample.Sample( - name='b', - type=self.test_counter.type, - volume=self.test_counter.volume, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - )]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(2, len(publisher.samples)) - self.assertEqual('a_update', getattr(publisher.samples[0], 'name')) - self.assertEqual('b_update', getattr(publisher.samples[1], 'name')) - - def test_flush_pipeline_cache(self): - CACHE_SIZE = 10 - extra_transformer_cfg = [ - { - 'name': 'cache', - 'parameters': { - 'size': CACHE_SIZE, - } - }, - { - 'name': 'update', - 'parameters': - { - 'append_name': '_new' - } - }, - ] - self._extend_pipeline_cfg('transformers', extra_transformer_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(self.test_counter) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.samples)) - pipe.flush() - self.assertEqual(0, len(publisher.samples)) - pipe.publish_data(self.test_counter) - pipe.flush() - self.assertEqual(0, len(publisher.samples)) - for i in range(CACHE_SIZE - 2): - pipe.publish_data(self.test_counter) - pipe.flush() - self.assertEqual(CACHE_SIZE, len(publisher.samples)) - self.assertEqual('a_update_new', getattr(publisher.samples[0], 'name')) - - def test_flush_pipeline_cache_multiple_counter(self): - CACHE_SIZE = 3 - extra_transformer_cfg = [ - { - 'name': 'cache', - 'parameters': { - 'size': CACHE_SIZE - } - }, - { - 'name': 'update', - 'parameters': - { - 'append_name': '_new' - } - }, - ] - self._extend_pipeline_cfg('transformers', extra_transformer_cfg) - self._set_pipeline_cfg('counters', ['a', 'b']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter, - sample.Sample( - name='b', - type=self.test_counter.type, - volume=self.test_counter.volume, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - )]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.samples)) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.assertEqual(CACHE_SIZE, len(publisher.samples)) - self.assertEqual('a_update_new', - getattr(publisher.samples[0], 'name')) - self.assertEqual('b_update_new', - getattr(publisher.samples[1], 'name')) - - def test_flush_pipeline_cache_before_publisher(self): - extra_transformer_cfg = [{ - 'name': 'cache', - 'parameters': {} - }] - self._extend_pipeline_cfg('transformers', extra_transformer_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - publisher = pipe.publishers[0] - pipe.publish_data(self.test_counter) - self.assertEqual(0, len(publisher.samples)) - pipe.flush() - self.assertEqual(1, len(publisher.samples)) - self.assertEqual('a_update', - getattr(publisher.samples[0], 'name')) - - def test_global_unit_conversion(self): - scale = 'volume / ((10**6) * 60)' - transformer_cfg = [ - { - 'name': 'unit_conversion', - 'parameters': { - 'source': {}, - 'target': {'name': 'cpu_mins', - 'unit': 'min', - 'scale': scale}, - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['cpu']) - counters = [ - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=1200000000, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={} - ), - ] - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(counters) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - pipe.flush() - self.assertEqual(1, len(publisher.samples)) - cpu_mins = publisher.samples[-1] - self.assertEqual('cpu_mins', getattr(cpu_mins, 'name')) - self.assertEqual('min', getattr(cpu_mins, 'unit')) - self.assertEqual(sample.TYPE_CUMULATIVE, getattr(cpu_mins, 'type')) - self.assertEqual(20, getattr(cpu_mins, 'volume')) - - def test_unit_identified_source_unit_conversion(self): - transformer_cfg = [ - { - 'name': 'unit_conversion', - 'parameters': { - 'source': {'unit': '°C'}, - 'target': {'unit': '°F', - 'scale': '(volume * 1.8) + 32'}, - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['core_temperature', - 'ambient_temperature']) - counters = [ - sample.Sample( - name='core_temperature', - type=sample.TYPE_GAUGE, - volume=36.0, - unit='°C', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={} - ), - sample.Sample( - name='ambient_temperature', - type=sample.TYPE_GAUGE, - volume=88.8, - unit='°F', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={} - ), - ] - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(counters) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(2, len(publisher.samples)) - core_temp = publisher.samples[0] - self.assertEqual('core_temperature', getattr(core_temp, 'name')) - self.assertEqual('°F', getattr(core_temp, 'unit')) - self.assertEqual(96.8, getattr(core_temp, 'volume')) - amb_temp = publisher.samples[1] - self.assertEqual('ambient_temperature', getattr(amb_temp, 'name')) - self.assertEqual('°F', getattr(amb_temp, 'unit')) - self.assertEqual(88.8, getattr(amb_temp, 'volume')) - self.assertEqual(96.8, getattr(core_temp, 'volume')) - - def _do_test_rate_of_change_conversion(self, prev, curr, type, expected, - offset=1, weight=None): - s = ("(resource_metadata.user_metadata.autoscaling_weight or 1.0)" - "* (resource_metadata.non.existent or 1.0)" - "* (100.0 / (10**9 * (resource_metadata.cpu_number or 1)))") - transformer_cfg = [ - { - 'name': 'rate_of_change', - 'parameters': { - 'source': {}, - 'target': {'name': 'cpu_util', - 'unit': '%', - 'type': sample.TYPE_GAUGE, - 'scale': s}, - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['cpu']) - now = timeutils.utcnow() - later = now + datetime.timedelta(minutes=offset) - um = {'autoscaling_weight': weight} if weight else {} - counters = [ - sample.Sample( - name='cpu', - type=type, - volume=prev, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=now.isoformat(), - resource_metadata={'cpu_number': 4, - 'user_metadata': um}, - ), - sample.Sample( - name='cpu', - type=type, - volume=prev, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource2', - timestamp=now.isoformat(), - resource_metadata={'cpu_number': 2, - 'user_metadata': um}, - ), - sample.Sample( - name='cpu', - type=type, - volume=curr, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=later.isoformat(), - resource_metadata={'cpu_number': 4, - 'user_metadata': um}, - ), - sample.Sample( - name='cpu', - type=type, - volume=curr, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource2', - timestamp=later.isoformat(), - resource_metadata={'cpu_number': 2, - 'user_metadata': um}, - ), - ] - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(counters) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(2, len(publisher.samples)) - pipe.flush() - self.assertEqual(2, len(publisher.samples)) - cpu_util = publisher.samples[0] - self.assertEqual('cpu_util', getattr(cpu_util, 'name')) - self.assertEqual('test_resource', getattr(cpu_util, 'resource_id')) - self.assertEqual('%', getattr(cpu_util, 'unit')) - self.assertEqual(sample.TYPE_GAUGE, getattr(cpu_util, 'type')) - self.assertEqual(expected, getattr(cpu_util, 'volume')) - cpu_util = publisher.samples[1] - self.assertEqual('cpu_util', getattr(cpu_util, 'name')) - self.assertEqual('test_resource2', getattr(cpu_util, 'resource_id')) - self.assertEqual('%', getattr(cpu_util, 'unit')) - self.assertEqual(sample.TYPE_GAUGE, getattr(cpu_util, 'type')) - self.assertEqual(expected * 2, getattr(cpu_util, 'volume')) - - def test_rate_of_change_conversion(self): - self._do_test_rate_of_change_conversion(120000000000, - 180000000000, - sample.TYPE_CUMULATIVE, - 25.0) - - def test_rate_of_change_conversion_weight(self): - self._do_test_rate_of_change_conversion(120000000000, - 180000000000, - sample.TYPE_CUMULATIVE, - 27.5, - weight=1.1) - - def test_rate_of_change_conversion_negative_cumulative_delta(self): - self._do_test_rate_of_change_conversion(180000000000, - 120000000000, - sample.TYPE_CUMULATIVE, - 50.0) - - def test_rate_of_change_conversion_negative_gauge_delta(self): - self._do_test_rate_of_change_conversion(180000000000, - 120000000000, - sample.TYPE_GAUGE, - -25.0) - - def test_rate_of_change_conversion_zero_delay(self): - self._do_test_rate_of_change_conversion(120000000000, - 120000000000, - sample.TYPE_CUMULATIVE, - 0.0, - offset=0) - - def test_rate_of_change_no_predecessor(self): - s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" - transformer_cfg = [ - { - 'name': 'rate_of_change', - 'parameters': { - 'source': {}, - 'target': {'name': 'cpu_util', - 'unit': '%', - 'type': sample.TYPE_GAUGE, - 'scale': s} - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['cpu']) - now = timeutils.utcnow() - counters = [ - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=120000000000, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=now.isoformat(), - resource_metadata={'cpu_number': 4} - ), - ] - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(counters) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.samples)) - pipe.flush() - self.assertEqual(0, len(publisher.samples)) - - @mock.patch('ceilometer.transformer.conversions.LOG') - def test_rate_of_change_out_of_order(self, the_log): - s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" - transformer_cfg = [ - { - 'name': 'rate_of_change', - 'parameters': { - 'source': {}, - 'target': {'name': 'cpu_util', - 'unit': '%', - 'type': sample.TYPE_GAUGE, - 'scale': s} - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['cpu']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - now = timeutils.utcnow() - earlier = now - datetime.timedelta(seconds=10) - later = now + datetime.timedelta(seconds=10) - - counters = [ - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=125000000000, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=now.isoformat(), - resource_metadata={'cpu_number': 4} - ), - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=120000000000, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=earlier.isoformat(), - resource_metadata={'cpu_number': 4} - ), - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=130000000000, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=later.isoformat(), - resource_metadata={'cpu_number': 4} - ), - ] - - pipe.publish_data(counters) - publisher = pipe.publishers[0] - self.assertEqual(1, len(publisher.samples)) - pipe.flush() - self.assertEqual(1, len(publisher.samples)) - - cpu_util_sample = publisher.samples[0] - self.assertEqual(12.5, cpu_util_sample.volume) - the_log.warning.assert_called_with( - 'dropping out of time order sample: %s', - (counters[1],) - ) - - def test_resources(self): - resources = ['test1://', 'test2://'] - self._set_pipeline_cfg('resources', resources) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertEqual(resources, - pipeline_manager.pipelines[0].resources) - - def test_no_resources(self): - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertEqual(0, len(pipeline_manager.pipelines[0].resources)) - - def _do_test_rate_of_change_mapping(self, pipe, meters, units): - now = timeutils.utcnow() - base = 1000 - offset = 7 - rate = 42 - later = now + datetime.timedelta(minutes=offset) - counters = [] - for v, ts in [(base, now.isoformat()), - (base + (offset * 60 * rate), later.isoformat())]: - for n, u, r in [(meters[0], units[0], 'resource1'), - (meters[1], units[1], 'resource2')]: - s = sample.Sample( - name=n, - type=sample.TYPE_CUMULATIVE, - volume=v, - unit=u, - user_id='test_user', - project_id='test_proj', - resource_id=r, - timestamp=ts, - resource_metadata={}, - ) - counters.append(s) - - pipe.publish_data(counters) - publisher = pipe.publishers[0] - self.assertEqual(2, len(publisher.samples)) - pipe.flush() - self.assertEqual(2, len(publisher.samples)) - bps = publisher.samples[0] - self.assertEqual('%s.rate' % meters[0], getattr(bps, 'name')) - self.assertEqual('resource1', getattr(bps, 'resource_id')) - self.assertEqual('%s/s' % units[0], getattr(bps, 'unit')) - self.assertEqual(sample.TYPE_GAUGE, getattr(bps, 'type')) - self.assertEqual(rate, getattr(bps, 'volume')) - rps = publisher.samples[1] - self.assertEqual('%s.rate' % meters[1], getattr(rps, 'name')) - self.assertEqual('resource2', getattr(rps, 'resource_id')) - self.assertEqual('%s/s' % units[1], getattr(rps, 'unit')) - self.assertEqual(sample.TYPE_GAUGE, getattr(rps, 'type')) - self.assertEqual(rate, getattr(rps, 'volume')) - - def test_rate_of_change_mapping(self): - map_from = {'name': 'disk\\.(read|write)\\.(bytes|requests)', - 'unit': '(B|request)'} - map_to = {'name': 'disk.\\1.\\2.rate', - 'unit': '\\1/s'} - transformer_cfg = [ - { - 'name': 'rate_of_change', - 'parameters': { - 'source': { - 'map_from': map_from - }, - 'target': { - 'map_to': map_to, - 'type': sample.TYPE_GAUGE - }, - }, - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['disk.read.bytes', - 'disk.write.requests']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - meters = ('disk.read.bytes', 'disk.write.requests') - units = ('B', 'request') - self._do_test_rate_of_change_mapping(pipe, meters, units) - - def _do_test_aggregator(self, parameters, expected_length): - transformer_cfg = [ - { - 'name': 'aggregator', - 'parameters': parameters, - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['storage.objects.incoming.bytes']) - counters = [ - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=26, - unit='B', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=16, - unit='B', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '2.0'} - ), - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=53, - unit='B', - user_id='test_user_bis', - project_id='test_proj_bis', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=42, - unit='B', - user_id='test_user_bis', - project_id='test_proj_bis', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '2.0'} - ), - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=15, - unit='B', - user_id='test_user', - project_id='test_proj_bis', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '2.0'} - ), - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=2, - unit='B', - user_id='test_user_bis', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '3.0'} - ), - ] - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(counters) - pipe.flush() - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(expected_length, len(publisher.samples)) - return sorted(publisher.samples, key=lambda s: s.volume) - - def test_aggregator_meter_type(self): - volumes = [1.0, 2.0, 3.0] - transformer_cfg = [ - { - 'name': 'aggregator', - 'parameters': {'size': len(volumes) * len(sample.TYPES)} - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', - ['testgauge', 'testcumulative', 'testdelta']) - counters = [] - for sample_type in sample.TYPES: - for volume in volumes: - counters.append(sample.Sample( - name='test' + sample_type, - type=sample_type, - volume=volume, - unit='B', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - )) - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(counters) - pipe.flush() - publisher = pipeline_manager.pipelines[0].publishers[0] - actual = sorted(s.volume for s in publisher.samples) - self.assertEqual([2.0, 3.0, 6.0], actual) - - def test_aggregator_metadata(self): - for conf, expected_version in [('last', '2.0'), ('first', '1.0')]: - samples = self._do_test_aggregator({ - 'resource_metadata': conf, - 'target': {'name': 'aggregated-bytes'} - }, expected_length=4) - s = samples[0] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(2, s.volume) - self.assertEqual('test_user_bis', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({'version': '3.0'}, - s.resource_metadata) - s = samples[1] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(15, s.volume) - self.assertEqual('test_user', s.user_id) - self.assertEqual('test_proj_bis', s.project_id) - self.assertEqual({'version': '2.0'}, - s.resource_metadata) - s = samples[2] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(42, s.volume) - self.assertEqual('test_user', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({'version': expected_version}, - s.resource_metadata) - s = samples[3] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(95, s.volume) - self.assertEqual('test_user_bis', s.user_id) - self.assertEqual('test_proj_bis', s.project_id) - self.assertEqual({'version': expected_version}, - s.resource_metadata) - - def test_aggregator_user_last_and_metadata_last(self): - samples = self._do_test_aggregator({ - 'resource_metadata': 'last', - 'user_id': 'last', - 'target': {'name': 'aggregated-bytes'} - }, expected_length=2) - s = samples[0] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(44, s.volume) - self.assertEqual('test_user_bis', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({'version': '3.0'}, - s.resource_metadata) - s = samples[1] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(110, s.volume) - self.assertEqual('test_user', s.user_id) - self.assertEqual('test_proj_bis', s.project_id) - self.assertEqual({'version': '2.0'}, - s.resource_metadata) - - def test_aggregator_user_first_and_metadata_last(self): - samples = self._do_test_aggregator({ - 'resource_metadata': 'last', - 'user_id': 'first', - 'target': {'name': 'aggregated-bytes'} - }, expected_length=2) - s = samples[0] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(44, s.volume) - self.assertEqual('test_user', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({'version': '3.0'}, - s.resource_metadata) - s = samples[1] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(110, s.volume) - self.assertEqual('test_user_bis', s.user_id) - self.assertEqual('test_proj_bis', s.project_id) - self.assertEqual({'version': '2.0'}, - s.resource_metadata) - - def test_aggregator_all_first(self): - samples = self._do_test_aggregator({ - 'resource_metadata': 'first', - 'user_id': 'first', - 'project_id': 'first', - 'target': {'name': 'aggregated-bytes'} - }, expected_length=1) - s = samples[0] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(154, s.volume) - self.assertEqual('test_user', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({'version': '1.0'}, - s.resource_metadata) - - def test_aggregator_all_last(self): - samples = self._do_test_aggregator({ - 'resource_metadata': 'last', - 'user_id': 'last', - 'project_id': 'last', - 'target': {'name': 'aggregated-bytes'} - }, expected_length=1) - s = samples[0] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(154, s.volume) - self.assertEqual('test_user_bis', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({'version': '3.0'}, - s.resource_metadata) - - def test_aggregator_all_mixed(self): - samples = self._do_test_aggregator({ - 'resource_metadata': 'drop', - 'user_id': 'first', - 'project_id': 'last', - 'target': {'name': 'aggregated-bytes'} - }, expected_length=1) - s = samples[0] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(154, s.volume) - self.assertEqual('test_user', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({}, s.resource_metadata) - - def test_aggregator_metadata_default(self): - samples = self._do_test_aggregator({ - 'user_id': 'last', - 'project_id': 'last', - 'target': {'name': 'aggregated-bytes'} - }, expected_length=1) - s = samples[0] - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(154, s.volume) - self.assertEqual('test_user_bis', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({'version': '3.0'}, - s.resource_metadata) - - @mock.patch('ceilometer.transformer.conversions.LOG') - def test_aggregator_metadata_invalid(self, mylog): - samples = self._do_test_aggregator({ - 'resource_metadata': 'invalid', - 'user_id': 'last', - 'project_id': 'last', - 'target': {'name': 'aggregated-bytes'} - }, expected_length=1) - s = samples[0] - self.assertTrue(mylog.warning.called) - self.assertEqual('aggregated-bytes', s.name) - self.assertEqual(154, s.volume) - self.assertEqual('test_user_bis', s.user_id) - self.assertEqual('test_proj', s.project_id) - self.assertEqual({'version': '3.0'}, - s.resource_metadata) - - def test_aggregator_sized_flush(self): - transformer_cfg = [ - { - 'name': 'aggregator', - 'parameters': {'size': 2}, - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['storage.objects.incoming.bytes']) - counters = [ - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=26, - unit='B', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=16, - unit='B', - user_id='test_user_bis', - project_id='test_proj_bis', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '2.0'} - ) - ] - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data([counters[0]]) - pipe.flush() - publisher = pipe.publishers[0] - self.assertEqual(0, len(publisher.samples)) - - pipe.publish_data([counters[1]]) - pipe.flush() - publisher = pipe.publishers[0] - self.assertEqual(2, len(publisher.samples)) - - def test_aggregator_timed_flush(self): - timeutils.set_time_override() - transformer_cfg = [ - { - 'name': 'aggregator', - 'parameters': {'size': 900, 'retention_time': 60}, - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['storage.objects.incoming.bytes']) - counters = [ - sample.Sample( - name='storage.objects.incoming.bytes', - type=sample.TYPE_DELTA, - volume=26, - unit='B', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - ] - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(counters) - pipe.flush() - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.samples)) - - timeutils.advance_time_seconds(120) - pipe.flush() - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.samples)) - - def test_aggregator_without_authentication(self): - transformer_cfg = [ - { - 'name': 'aggregator', - 'parameters': {'size': 2}, - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['storage.objects.outgoing.bytes']) - counters = [ - sample.Sample( - name='storage.objects.outgoing.bytes', - type=sample.TYPE_DELTA, - volume=26, - unit='B', - user_id=None, - project_id=None, - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - sample.Sample( - name='storage.objects.outgoing.bytes', - type=sample.TYPE_DELTA, - volume=16, - unit='B', - user_id=None, - project_id=None, - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '2.0'} - ) - ] - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data([counters[0]]) - pipe.flush() - publisher = pipe.publishers[0] - self.assertEqual(0, len(publisher.samples)) - - pipe.publish_data([counters[1]]) - pipe.flush() - publisher = pipe.publishers[0] - - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(42, getattr(publisher.samples[0], 'volume')) - self.assertEqual("test_resource", getattr(publisher.samples[0], - 'resource_id')) - - def test_aggregator_to_rate_of_change_transformer_two_resources(self): - resource_id = ['1ca738a1-c49c-4401-8346-5c60ebdb03f4', - '5dd418a6-c6a9-49c9-9cef-b357d72c71dd'] - - aggregator = conversions.AggregatorTransformer(size="2", - timestamp="last") - - rate_of_change_transformer = conversions.RateOfChangeTransformer() - - counter_time = timeutils.parse_isotime('2016-01-01T12:00:00+00:00') - - for offset in range(2): - counter = copy.copy(self.test_counter) - counter.timestamp = timeutils.isotime(counter_time) - counter.resource_id = resource_id[0] - counter.volume = offset - counter.type = sample.TYPE_CUMULATIVE - counter.unit = 'ns' - aggregator.handle_sample(counter) - - if offset == 1: - test_time = counter_time - - counter_time = counter_time + datetime.timedelta(0, 1) - - aggregated_counters = aggregator.flush() - self.assertEqual(len(aggregated_counters), 1) - self.assertEqual(aggregated_counters[0].timestamp, - timeutils.isotime(test_time)) - - rate_of_change_transformer.handle_sample(aggregated_counters[0]) - - for offset in range(2): - counter = copy.copy(self.test_counter) - counter.timestamp = timeutils.isotime(counter_time) - counter.resource_id = resource_id[offset] - counter.volume = 2 - counter.type = sample.TYPE_CUMULATIVE - counter.unit = 'ns' - aggregator.handle_sample(counter) - - if offset == 0: - test_time = counter_time - - counter_time = counter_time + datetime.timedelta(0, 1) - - aggregated_counters = aggregator.flush() - self.assertEqual(len(aggregated_counters), 2) - - for counter in aggregated_counters: - if counter.resource_id == resource_id[0]: - rateOfChange = rate_of_change_transformer.handle_sample( - counter) - self.assertEqual(counter.timestamp, - timeutils.isotime(test_time)) - - self.assertEqual(rateOfChange.volume, 1) - - def _do_test_arithmetic_expr_parse(self, expr, expected): - actual = arithmetic.ArithmeticTransformer.parse_expr(expr) - self.assertEqual(expected, actual) - - def test_arithmetic_expr_parse(self): - expr = '$(cpu) + $(cpu.util)' - expected = ('cpu.volume + _cpu_util_ESC.volume', - { - 'cpu': 'cpu', - 'cpu.util': '_cpu_util_ESC' - }) - self._do_test_arithmetic_expr_parse(expr, expected) - - def test_arithmetic_expr_parse_parameter(self): - expr = '$(cpu) + $(cpu.util).resource_metadata' - expected = ('cpu.volume + _cpu_util_ESC.resource_metadata', - { - 'cpu': 'cpu', - 'cpu.util': '_cpu_util_ESC' - }) - self._do_test_arithmetic_expr_parse(expr, expected) - - def test_arithmetic_expr_parse_reserved_keyword(self): - expr = '$(class) + $(cpu.util)' - expected = ('_class_ESC.volume + _cpu_util_ESC.volume', - { - 'class': '_class_ESC', - 'cpu.util': '_cpu_util_ESC' - }) - self._do_test_arithmetic_expr_parse(expr, expected) - - def test_arithmetic_expr_parse_already_escaped(self): - expr = '$(class) + $(_class_ESC)' - expected = ('_class_ESC.volume + __class_ESC_ESC.volume', - { - 'class': '_class_ESC', - '_class_ESC': '__class_ESC_ESC' - }) - self._do_test_arithmetic_expr_parse(expr, expected) - - def _do_test_arithmetic(self, expression, scenario, expected): - transformer_cfg = [ - { - 'name': 'arithmetic', - 'parameters': { - 'target': {'name': 'new_meter', - 'unit': '%', - 'type': sample.TYPE_GAUGE, - 'expr': expression}, - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', - list(set(s['name'] for s in scenario))) - counters = [] - test_resources = ['test_resource1', 'test_resource2'] - for resource_id in test_resources: - for s in scenario: - counters.append(sample.Sample( - name=s['name'], - type=sample.TYPE_CUMULATIVE, - volume=s['volume'], - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id=resource_id, - timestamp=timeutils.utcnow().isoformat(), - resource_metadata=s.get('metadata') - )) - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - for s in counters: - pipe.publish_data(s) - pipe.flush() - publisher = pipeline_manager.pipelines[0].publishers[0] - expected_len = len(test_resources) * len(expected) - self.assertEqual(expected_len, len(publisher.samples)) - - # bucket samples by resource first - samples_by_resource = dict((r, []) for r in test_resources) - for s in publisher.samples: - samples_by_resource[s.resource_id].append(s) - - for resource_id in samples_by_resource: - self.assertEqual(len(expected), - len(samples_by_resource[resource_id])) - for i, s in enumerate(samples_by_resource[resource_id]): - self.assertEqual('new_meter', getattr(s, 'name')) - self.assertEqual(resource_id, getattr(s, 'resource_id')) - self.assertEqual('%', getattr(s, 'unit')) - self.assertEqual(sample.TYPE_GAUGE, getattr(s, 'type')) - self.assertEqual(expected[i], getattr(s, 'volume')) - - def test_arithmetic_transformer(self): - expression = '100.0 * $(memory.usage) / $(memory)' - scenario = [ - dict(name='memory', volume=1024.0), - dict(name='memory.usage', volume=512.0), - ] - expected = [50.0] - self._do_test_arithmetic(expression, scenario, expected) - - def test_arithmetic_transformer_expr_empty(self): - expression = '' - scenario = [ - dict(name='memory', volume=1024.0), - dict(name='memory.usage', volume=512.0), - ] - expected = [] - self._do_test_arithmetic(expression, scenario, expected) - - def test_arithmetic_transformer_expr_misconfigured(self): - expression = '512.0 * 3' - scenario = [ - dict(name='memory', volume=1024.0), - dict(name='memory.usage', volume=512.0), - ] - expected = [] - self._do_test_arithmetic(expression, scenario, expected) - - def test_arithmetic_transformer_nan(self): - expression = 'float(\'nan\') * $(memory.usage) / $(memory)' - scenario = [ - dict(name='memory', volume=1024.0), - dict(name='memory.usage', volume=512.0), - ] - expected = [] - self._do_test_arithmetic(expression, scenario, expected) - - def test_arithmetic_transformer_exception(self): - expression = '$(memory) / 0' - scenario = [ - dict(name='memory', volume=1024.0), - dict(name='memory.usage', volume=512.0), - ] - expected = [] - self._do_test_arithmetic(expression, scenario, expected) - - def test_arithmetic_transformer_multiple_samples(self): - expression = '100.0 * $(memory.usage) / $(memory)' - scenario = [ - dict(name='memory', volume=2048.0), - dict(name='memory.usage', volume=512.0), - dict(name='memory', volume=1024.0), - ] - expected = [25.0] - self._do_test_arithmetic(expression, scenario, expected) - - def test_arithmetic_transformer_missing(self): - expression = '100.0 * $(memory.usage) / $(memory)' - scenario = [dict(name='memory.usage', volume=512.0)] - expected = [] - self._do_test_arithmetic(expression, scenario, expected) - - def test_arithmetic_transformer_more_than_needed(self): - expression = '100.0 * $(memory.usage) / $(memory)' - scenario = [ - dict(name='memory', volume=1024.0), - dict(name='memory.usage', volume=512.0), - dict(name='cpu_util', volume=90.0), - ] - expected = [50.0] - self._do_test_arithmetic(expression, scenario, expected) - - def test_arithmetic_transformer_cache_cleared(self): - transformer_cfg = [ - { - 'name': 'arithmetic', - 'parameters': { - 'target': {'name': 'new_meter', - 'expr': '$(memory.usage) + 2'} - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['memory.usage']) - counter = sample.Sample( - name='memory.usage', - type=sample.TYPE_GAUGE, - volume=1024.0, - unit='MB', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata=None - ) - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data([counter]) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.samples)) - pipe.flush() - self.assertEqual(1, len(publisher.samples)) - self.assertEqual(1026.0, publisher.samples[0].volume) - - pipe.flush() - self.assertEqual(1, len(publisher.samples)) - - counter.volume = 2048.0 - pipe.publish_data([counter]) - pipe.flush() - self.assertEqual(2, len(publisher.samples)) - self.assertEqual(2050.0, publisher.samples[1].volume) - - def test_aggregator_timed_flush_no_matching_samples(self): - timeutils.set_time_override() - transformer_cfg = [ - { - 'name': 'aggregator', - 'parameters': {'size': 900, 'retention_time': 60}, - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['unrelated-sample']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - timeutils.advance_time_seconds(200) - pipe = pipeline_manager.pipelines[0] - pipe.flush() - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.samples)) - - def _do_test_delta(self, data, expected, growth_only=False): - transformer_cfg = [ - { - 'name': 'delta', - 'parameters': { - 'target': {'name': 'new_meter'}, - 'growth_only': growth_only, - } - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - self._set_pipeline_cfg('counters', ['cpu']) - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[0] - - pipe.publish_data(data) - pipe.flush() - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(expected, len(publisher.samples)) - return publisher.samples - - def test_delta_transformer(self): - samples = [ - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=26, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=16, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '2.0'} - ), - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=53, - unit='ns', - user_id='test_user_bis', - project_id='test_proj_bis', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - ] - deltas = self._do_test_delta(samples, 2) - self.assertEqual('new_meter', deltas[0].name) - self.assertEqual('delta', deltas[0].type) - self.assertEqual('ns', deltas[0].unit) - self.assertEqual({'version': '2.0'}, deltas[0].resource_metadata) - self.assertEqual(-10, deltas[0].volume) - self.assertEqual('new_meter', deltas[1].name) - self.assertEqual('delta', deltas[1].type) - self.assertEqual('ns', deltas[1].unit) - self.assertEqual({'version': '1.0'}, deltas[1].resource_metadata) - self.assertEqual(37, deltas[1].volume) - - def test_delta_transformer_out_of_order(self): - samples = [ - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=26, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=16, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=((timeutils.utcnow() - datetime.timedelta(minutes=5)) - .isoformat()), - resource_metadata={'version': '2.0'} - ), - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=53, - unit='ns', - user_id='test_user_bis', - project_id='test_proj_bis', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - ] - deltas = self._do_test_delta(samples, 1) - self.assertEqual('new_meter', deltas[0].name) - self.assertEqual('delta', deltas[0].type) - self.assertEqual('ns', deltas[0].unit) - self.assertEqual({'version': '1.0'}, deltas[0].resource_metadata) - self.assertEqual(27, deltas[0].volume) - - def test_delta_transformer_growth_only(self): - samples = [ - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=26, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=16, - unit='ns', - user_id='test_user', - project_id='test_proj', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '2.0'} - ), - sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - volume=53, - unit='ns', - user_id='test_user_bis', - project_id='test_proj_bis', - resource_id='test_resource', - timestamp=timeutils.utcnow().isoformat(), - resource_metadata={'version': '1.0'} - ), - ] - deltas = self._do_test_delta(samples, 1, True) - self.assertEqual('new_meter', deltas[0].name) - self.assertEqual('delta', deltas[0].type) - self.assertEqual('ns', deltas[0].unit) - self.assertEqual({'version': '1.0'}, deltas[0].resource_metadata) - self.assertEqual(37, deltas[0].volume) - - def test_unique_pipeline_names(self): - self._dup_pipeline_name_cfg() - self._exception_create_pipelinemanager() - - def test_get_pipeline_grouping_key(self): - transformer_cfg = [ - { - 'name': 'update', - 'parameters': {} - }, - { - 'name': 'unit_conversion', - 'parameters': { - 'source': {}, - 'target': {'name': 'cpu_mins', - 'unit': 'min', - 'scale': 'volume'}, - } - }, - { - 'name': 'update', - 'parameters': {} - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertEqual(set(['resource_id', 'counter_name']), - set(pipeline.get_pipeline_grouping_key( - pipeline_manager.pipelines[0]))) - - def test_get_pipeline_duplicate_grouping_key(self): - transformer_cfg = [ - { - 'name': 'update', - 'parameters': {} - }, - { - 'name': 'update', - 'parameters': {} - }, - ] - self._set_pipeline_cfg('transformers', transformer_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - self.assertEqual(['counter_name'], - pipeline.get_pipeline_grouping_key( - pipeline_manager.pipelines[0])) diff --git a/ceilometer/tests/tempest/__init__.py b/ceilometer/tests/tempest/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/tempest/api/__init__.py b/ceilometer/tests/tempest/api/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/tempest/api/base.py b/ceilometer/tests/tempest/api/base.py deleted file mode 100644 index 81c53d64..00000000 --- a/ceilometer/tests/tempest/api/base.py +++ /dev/null @@ -1,162 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -from oslo_utils import timeutils -from tempest.common import compute -from tempest.common.utils import data_utils -from tempest import config -from tempest import exceptions -from tempest.lib import exceptions as lib_exc -import tempest.test - -from ceilometer.tests.tempest.service import client - - -CONF = config.CONF - - -class ClientManager(client.Manager): - - load_clients = [ - 'servers_client', - 'compute_networks_client', - 'compute_floating_ips_client', - 'flavors_client', - 'image_client', - 'image_client_v2', - 'telemetry_client', - ] - - -class BaseTelemetryTest(tempest.test.BaseTestCase): - - """Base test case class for all Telemetry API tests.""" - - credentials = ['primary'] - client_manager = ClientManager - - @classmethod - def skip_checks(cls): - super(BaseTelemetryTest, cls).skip_checks() - if not CONF.service_available.ceilometer: - raise cls.skipException("Ceilometer support is required") - - @classmethod - def setup_credentials(cls): - cls.set_network_resources() - super(BaseTelemetryTest, cls).setup_credentials() - - @classmethod - def setup_clients(cls): - super(BaseTelemetryTest, cls).setup_clients() - cls.telemetry_client = cls.os_primary.telemetry_client - cls.servers_client = cls.os_primary.servers_client - cls.flavors_client = cls.os_primary.flavors_client - cls.image_client = cls.os_primary.image_client - cls.image_client_v2 = cls.os_primary.image_client_v2 - - @classmethod - def resource_setup(cls): - super(BaseTelemetryTest, cls).resource_setup() - cls.nova_notifications = ['memory', 'vcpus', 'disk.root.size', - 'disk.ephemeral.size'] - - cls.glance_notifications = ['image.size'] - - cls.glance_v2_notifications = ['image.download', 'image.serve'] - - cls.server_ids = [] - cls.image_ids = [] - - @classmethod - def create_server(cls): - tenant_network = cls.get_tenant_network() - body, server = compute.create_test_server( - cls.os_primary, - tenant_network=tenant_network, - name=data_utils.rand_name('ceilometer-instance'), - wait_until='ACTIVE') - cls.server_ids.append(body['id']) - return body - - @classmethod - def create_image(cls, client, **kwargs): - body = client.create_image(name=data_utils.rand_name('image'), - container_format='bare', - disk_format='raw', - **kwargs) - # TODO(jswarren) Move ['image'] up to initial body value assignment - # once both v1 and v2 glance clients include the full response - # object. - if 'image' in body: - body = body['image'] - cls.image_ids.append(body['id']) - return body - - @staticmethod - def cleanup_resources(method, list_of_ids): - for resource_id in list_of_ids: - try: - method(resource_id) - except lib_exc.NotFound: - pass - - @classmethod - def resource_cleanup(cls): - cls.cleanup_resources(cls.servers_client.delete_server, cls.server_ids) - cls.cleanup_resources(cls.image_client.delete_image, cls.image_ids) - super(BaseTelemetryTest, cls).resource_cleanup() - - def await_samples(self, metric, query): - """This method is to wait for sample to add it to database. - - There are long time delays when using Postgresql (or Mysql) - database as ceilometer backend - """ - timeout = CONF.compute.build_timeout - start = timeutils.utcnow() - while timeutils.delta_seconds(start, timeutils.utcnow()) < timeout: - body = self.telemetry_client.list_samples(metric, query) - if body: - return body - time.sleep(CONF.compute.build_interval) - - raise exceptions.TimeoutException( - 'Sample for metric:%s with query:%s has not been added to the ' - 'database within %d seconds' % (metric, query, - CONF.compute.build_timeout)) - - -class BaseTelemetryAdminTest(BaseTelemetryTest): - """Base test case class for admin Telemetry API tests.""" - - credentials = ['primary', 'admin'] - - @classmethod - def setup_clients(cls): - super(BaseTelemetryAdminTest, cls).setup_clients() - cls.telemetry_admin_client = cls.os_adm.telemetry_client - - def await_events(self, query): - timeout = CONF.compute.build_timeout - start = timeutils.utcnow() - while timeutils.delta_seconds(start, timeutils.utcnow()) < timeout: - body = self.telemetry_admin_client.list_events(query) - if body: - return body - time.sleep(CONF.compute.build_interval) - - raise exceptions.TimeoutException( - 'Event with query:%s has not been added to the ' - 'database within %d seconds' % (query, CONF.compute.build_timeout)) diff --git a/ceilometer/tests/tempest/api/test_telemetry_notification_api.py b/ceilometer/tests/tempest/api/test_telemetry_notification_api.py deleted file mode 100644 index d723b558..00000000 --- a/ceilometer/tests/tempest/api/test_telemetry_notification_api.py +++ /dev/null @@ -1,87 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Change-Id: I14e16a1a7d9813b324ee40545c07f0e88fb637b7 - -import testtools - -from ceilometer.tests.tempest.api import base -from tempest import config -from tempest.lib import decorators -from tempest import test - - -CONF = config.CONF - - -class TelemetryNotificationAPITest(base.BaseTelemetryTest): - - @test.idempotent_id('d7f8c1c8-d470-4731-8604-315d3956caae') - @test.services('compute') - def test_check_nova_notification(self): - - body = self.create_server() - - query = ('resource', 'eq', body['id']) - - for metric in self.nova_notifications: - self.await_samples(metric, query) - - @test.attr(type="smoke") - @test.idempotent_id('04b10bfe-a5dc-47af-b22f-0460426bf499') - @test.services("image") - @testtools.skipIf(not CONF.image_feature_enabled.api_v1, - "Glance api v1 is disabled") - def test_check_glance_v1_notifications(self): - body = self.create_image(self.image_client, is_public=False) - self.image_client.update_image(body['id'], data='data') - - query = 'resource', 'eq', body['id'] - - self.image_client.delete_image(body['id']) - - for metric in self.glance_notifications: - self.await_samples(metric, query) - - @test.attr(type="smoke") - @test.idempotent_id('c240457d-d943-439b-8aea-85e26d64fe8f') - @test.services("image") - @testtools.skipIf(not CONF.image_feature_enabled.api_v2, - "Glance api v2 is disabled") - def test_check_glance_v2_notifications(self): - body = self.create_image(self.image_client_v2, visibility='private') - - self.image_client_v2.store_image_file(body['id'], "file") - self.image_client_v2.show_image_file(body['id']) - - query = 'resource', 'eq', body['id'] - - for metric in self.glance_v2_notifications: - self.await_samples(metric, query) - - -class TelemetryNotificationAdminAPITest(base.BaseTelemetryAdminTest): - - @test.idempotent_id('29604198-8b45-4fc0-8af8-1cae4f94ebea') - @test.services('compute') - @decorators.skip_because(bug='1480490') - def test_check_nova_notification_event_and_meter(self): - - body = self.create_server() - - if CONF.telemetry.event_enabled: - query = ('instance_id', 'eq', body['id']) - self.await_events(query) - - query = ('resource', 'eq', body['id']) - for metric in self.nova_notifications: - self.await_samples(metric, query) diff --git a/ceilometer/tests/tempest/config.py b/ceilometer/tests/tempest/config.py deleted file mode 100644 index dea33f6c..00000000 --- a/ceilometer/tests/tempest/config.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2012 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - - -service_available_group = cfg.OptGroup(name="service_available", - title="Available OpenStack Services") - -ServiceAvailableGroup = [ - cfg.BoolOpt('ceilometer', - default=True, - help="Whether or not Ceilometer is expected to be available"), -] - -telemetry_group = cfg.OptGroup(name='telemetry', - title='Telemetry Service Options') - -TelemetryGroup = [ - cfg.StrOpt('catalog_type', - default='metering', - help="Catalog type of the Telemetry service."), - cfg.StrOpt('endpoint_type', - default='publicURL', - choices=['public', 'admin', 'internal', - 'publicURL', 'adminURL', 'internalURL'], - help="The endpoint type to use for the telemetry service."), - cfg.BoolOpt('event_enabled', - default=True, - help="Runs Ceilometer event-related tests"), -] diff --git a/ceilometer/tests/tempest/plugin.py b/ceilometer/tests/tempest/plugin.py deleted file mode 100644 index 077a3c20..00000000 --- a/ceilometer/tests/tempest/plugin.py +++ /dev/null @@ -1,44 +0,0 @@ -# -# Copyright 2015 NEC Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from tempest import config -from tempest.test_discover import plugins - -import ceilometer -from ceilometer.tests.tempest import config as tempest_config - - -class CeilometerTempestPlugin(plugins.TempestPlugin): - - def load_tests(self): - base_path = os.path.split(os.path.dirname( - os.path.abspath(ceilometer.__file__)))[0] - test_dir = "ceilometer/tests/tempest" - full_test_dir = os.path.join(base_path, test_dir) - return full_test_dir, base_path - - def register_opts(self, conf): - config.register_opt_group(conf, - tempest_config.service_available_group, - tempest_config.ServiceAvailableGroup) - config.register_opt_group(conf, - tempest_config.telemetry_group, - tempest_config.TelemetryGroup) - - def get_opt_lists(self): - return [(tempest_config.telemetry_group.name, - tempest_config.TelemetryGroup)] diff --git a/ceilometer/tests/tempest/scenario/__init__.py b/ceilometer/tests/tempest/scenario/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/tempest/scenario/test_object_storage_telemetry_middleware.py b/ceilometer/tests/tempest/scenario/test_object_storage_telemetry_middleware.py deleted file mode 100644 index 0d99ee35..00000000 --- a/ceilometer/tests/tempest/scenario/test_object_storage_telemetry_middleware.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright 2014 Red Hat -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_log import log as logging -from tempest.common.utils import data_utils -from tempest import config -from tempest import test - -from ceilometer.tests.tempest.service import client - - -CONF = config.CONF - -LOG = logging.getLogger(__name__) - -# Loop for up to 120 seconds waiting on notifications -# NOTE(chdent): The choice of 120 seconds is fairly -# arbitrary: Long enough to give the notifications the -# chance to travel across a highly latent bus but not -# so long as to allow excessive latency to never be visible. -# TODO(chdent): Ideally this value would come from configuration. -NOTIFICATIONS_WAIT = 120 -NOTIFICATIONS_SLEEP = 1 - - -class ClientManager(client.Manager): - - load_clients = [ - 'telemetry_client', - 'container_client', - 'object_client', - ] - - -class TestObjectStorageTelemetry(test.BaseTestCase): - """Test that swift uses the ceilometer middleware. - - * create container. - * upload a file to the created container. - * retrieve the file from the created container. - * wait for notifications from ceilometer. - """ - - credentials = ['primary'] - client_manager = ClientManager - - @classmethod - def skip_checks(cls): - super(TestObjectStorageTelemetry, cls).skip_checks() - if not CONF.service_available.swift: - skip_msg = ("%s skipped as swift is not available" % - cls.__name__) - raise cls.skipException(skip_msg) - if not CONF.service_available.ceilometer: - skip_msg = ("%s skipped as ceilometer is not available" % - cls.__name__) - raise cls.skipException(skip_msg) - - @classmethod - def setup_credentials(cls): - cls.set_network_resources() - super(TestObjectStorageTelemetry, cls).setup_credentials() - - @classmethod - def setup_clients(cls): - super(TestObjectStorageTelemetry, cls).setup_clients() - cls.telemetry_client = cls.os_primary.telemetry_client - cls.container_client = cls.os_primary.container_client - cls.object_client = cls.os_primary.object_client - - def _confirm_notifications(self, container_name, obj_name): - # NOTE: Loop seeking for appropriate notifications about the containers - # and objects sent to swift. - - def _check_samples(): - # NOTE: Return True only if we have notifications about some - # containers and some objects and the notifications are about - # the expected containers and objects. - # Otherwise returning False will case _check_samples to be - # called again. - results = self.telemetry_client.list_samples( - 'storage.objects.incoming.bytes') - LOG.debug('got samples %s', results) - - # Extract container info from samples. - containers, objects = [], [] - for sample in results: - meta = sample['resource_metadata'] - if meta.get('container') and meta['container'] != 'None': - containers.append(meta['container']) - elif (meta.get('target.metadata:container') and - meta['target.metadata:container'] != 'None'): - containers.append(meta['target.metadata:container']) - - if meta.get('object') and meta['object'] != 'None': - objects.append(meta['object']) - elif (meta.get('target.metadata:object') and - meta['target.metadata:object'] != 'None'): - objects.append(meta['target.metadata:object']) - - return (container_name in containers and obj_name in objects) - - self.assertTrue(test.call_until_true(_check_samples, - NOTIFICATIONS_WAIT, - NOTIFICATIONS_SLEEP), - 'Correct notifications were not received after ' - '%s seconds.' % NOTIFICATIONS_WAIT) - - def create_container(self): - name = data_utils.rand_name('swift-scenario-container') - self.container_client.create_container(name) - # look for the container to assure it is created - self.container_client.list_container_contents(name) - LOG.debug('Container %s created' % (name)) - self.addCleanup(self.container_client.delete_container, - name) - return name - - def upload_object_to_container(self, container_name): - obj_name = data_utils.rand_name('swift-scenario-object') - obj_data = data_utils.arbitrary_string() - self.object_client.create_object(container_name, obj_name, obj_data) - self.addCleanup(self.object_client.delete_object, - container_name, - obj_name) - return obj_name - - @test.idempotent_id('6d6b88e5-3e38-41bc-b34a-79f713a6cb85') - @test.services('object_storage') - def test_swift_middleware_notifies(self): - container_name = self.create_container() - obj_name = self.upload_object_to_container(container_name) - self._confirm_notifications(container_name, obj_name) diff --git a/ceilometer/tests/tempest/service/__init__.py b/ceilometer/tests/tempest/service/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/tempest/service/client.py b/ceilometer/tests/tempest/service/client.py deleted file mode 100644 index 179f8a1a..00000000 --- a/ceilometer/tests/tempest/service/client.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright 2014 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_serialization import jsonutils as json -from six.moves.urllib import parse as urllib - -from tempest import config -from tempest.lib.common import rest_client -from tempest.lib.services.compute.flavors_client import FlavorsClient -from tempest.lib.services.compute.floating_ips_client import FloatingIPsClient -from tempest.lib.services.compute.networks_client import NetworksClient -from tempest.lib.services.compute.servers_client import ServersClient -from tempest import manager -from tempest.services.object_storage.container_client import ContainerClient -from tempest.services.object_storage.object_client import ObjectClient - -from ceilometer.tests.tempest.service.images.v1.images_client import \ - ImagesClient -from ceilometer.tests.tempest.service.images.v2.images_client import \ - ImagesClient as ImagesClientV2 - - -CONF = config.CONF - - -class TelemetryClient(rest_client.RestClient): - - version = '2' - uri_prefix = "v2" - - def deserialize(self, body): - return json.loads(body.replace("\n", "")) - - def serialize(self, body): - return json.dumps(body) - - def create_sample(self, meter_name, sample_list): - uri = "%s/meters/%s" % (self.uri_prefix, meter_name) - body = self.serialize(sample_list) - resp, body = self.post(uri, body) - self.expected_success(200, resp.status) - body = self.deserialize(body) - return rest_client.ResponseBody(resp, body) - - def _helper_list(self, uri, query=None, period=None): - uri_dict = {} - if query: - uri_dict = {'q.field': query[0], - 'q.op': query[1], - 'q.value': query[2]} - if period: - uri_dict['period'] = period - if uri_dict: - uri += "?%s" % urllib.urlencode(uri_dict) - resp, body = self.get(uri) - self.expected_success(200, resp.status) - body = self.deserialize(body) - return rest_client.ResponseBodyList(resp, body) - - def list_resources(self, query=None): - uri = '%s/resources' % self.uri_prefix - return self._helper_list(uri, query) - - def list_meters(self, query=None): - uri = '%s/meters' % self.uri_prefix - return self._helper_list(uri, query) - - def list_statistics(self, meter, period=None, query=None): - uri = "%s/meters/%s/statistics" % (self.uri_prefix, meter) - return self._helper_list(uri, query, period) - - def list_samples(self, meter_id, query=None): - uri = '%s/meters/%s' % (self.uri_prefix, meter_id) - return self._helper_list(uri, query) - - def list_events(self, query=None): - uri = '%s/events' % self.uri_prefix - return self._helper_list(uri, query) - - def show_resource(self, resource_id): - uri = '%s/resources/%s' % (self.uri_prefix, resource_id) - resp, body = self.get(uri) - self.expected_success(200, resp.status) - body = self.deserialize(body) - return rest_client.ResponseBody(resp, body) - - -class Manager(manager.Manager): - - load_clients = [ - 'servers_client', - 'compute_networks_client', - 'compute_floating_ips_client', - 'flavors_client', - 'image_client', - 'image_client_v2', - 'telemetry_client', - 'container_client', - 'object_client', - ] - - default_params = { - 'disable_ssl_certificate_validation': - CONF.identity.disable_ssl_certificate_validation, - 'ca_certs': CONF.identity.ca_certificates_file, - 'trace_requests': CONF.debug.trace_requests - } - - compute_params = { - 'service': CONF.compute.catalog_type, - 'region': CONF.compute.region or CONF.identity.region, - 'endpoint_type': CONF.compute.endpoint_type, - 'build_interval': CONF.compute.build_interval, - 'build_timeout': CONF.compute.build_timeout, - } - compute_params.update(default_params) - - image_params = { - 'catalog_type': CONF.image.catalog_type, - 'region': CONF.image.region or CONF.identity.region, - 'endpoint_type': CONF.image.endpoint_type, - 'build_interval': CONF.image.build_interval, - 'build_timeout': CONF.image.build_timeout, - } - image_params.update(default_params) - - telemetry_params = { - 'service': CONF.telemetry.catalog_type, - 'region': CONF.identity.region, - 'endpoint_type': CONF.telemetry.endpoint_type, - } - telemetry_params.update(default_params) - - object_storage_params = { - 'service': CONF.object_storage.catalog_type, - 'region': CONF.object_storage.region or CONF.identity.region, - 'endpoint_type': CONF.object_storage.endpoint_type - } - object_storage_params.update(default_params) - - def __init__(self, credentials=None, service=None): - super(Manager, self).__init__(credentials) - for client in self.load_clients: - getattr(self, 'set_%s' % client)() - - def set_servers_client(self): - self.servers_client = ServersClient(self.auth_provider, - **self.compute_params) - - def set_compute_networks_client(self): - self.compute_networks_client = NetworksClient(self.auth_provider, - **self.compute_params) - - def set_compute_floating_ips_client(self): - self.compute_floating_ips_client = FloatingIPsClient( - self.auth_provider, - **self.compute_params) - - def set_flavors_client(self): - self.flavors_client = FlavorsClient(self.auth_provider, - **self.compute_params) - - def set_image_client(self): - self.image_client = ImagesClient(self.auth_provider, - **self.image_params) - - def set_image_client_v2(self): - self.image_client_v2 = ImagesClientV2(self.auth_provider, - **self.image_params) - - def set_telemetry_client(self): - self.telemetry_client = TelemetryClient(self.auth_provider, - **self.telemetry_params) - - def set_container_client(self): - self.container_client = ContainerClient(self.auth_provider, - **self.object_storage_params) - - def set_object_client(self): - self.object_client = ObjectClient(self.auth_provider, - **self.object_storage_params) diff --git a/ceilometer/tests/unit/agent/__init__.py b/ceilometer/tests/unit/agent/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/agent/agentbase.py b/ceilometer/tests/unit/agent/agentbase.py deleted file mode 100644 index 666cf6ab..00000000 --- a/ceilometer/tests/unit/agent/agentbase.py +++ /dev/null @@ -1,738 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 Intel corp. -# Copyright 2013 eNovance -# Copyright 2014 Red Hat, Inc -# -# Authors: Yunhong Jiang -# Julien Danjou -# Eoghan Glynn -# Nejc Saje -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import copy -import datetime - -import mock -from oslo_config import fixture as fixture_config -from oslotest import mockpatch -import six -from stevedore import extension - -from ceilometer.agent import plugin_base -from ceilometer import pipeline -from ceilometer import publisher -from ceilometer.publisher import test as test_publisher -from ceilometer import sample -from ceilometer.tests import base -from ceilometer import utils - - -class TestSample(sample.Sample): - def __init__(self, name, type, unit, volume, user_id, project_id, - resource_id, timestamp=None, resource_metadata=None, - source=None): - super(TestSample, self).__init__(name, type, unit, volume, user_id, - project_id, resource_id, timestamp, - resource_metadata, source) - - def __eq__(self, other): - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - return not self.__eq__(other) - - -default_test_data = TestSample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'Pollster'}, -) - - -class TestPollster(plugin_base.PollsterBase): - test_data = default_test_data - discovery = None - - @property - def default_discovery(self): - return self.discovery - - def get_samples(self, manager, cache, resources): - resources = resources or [] - self.samples.append((manager, resources)) - self.resources.extend(resources) - c = copy.deepcopy(self.test_data) - c.resource_metadata['resources'] = resources - return [c] - - -class BatchTestPollster(TestPollster): - test_data = default_test_data - discovery = None - - @property - def default_discovery(self): - return self.discovery - - def get_samples(self, manager, cache, resources): - resources = resources or [] - self.samples.append((manager, resources)) - self.resources.extend(resources) - for resource in resources: - c = copy.deepcopy(self.test_data) - c.timestamp = datetime.datetime.utcnow().isoformat() - c.resource_id = resource - c.resource_metadata['resource'] = resource - yield c - - -class TestPollsterException(TestPollster): - def get_samples(self, manager, cache, resources): - resources = resources or [] - self.samples.append((manager, resources)) - self.resources.extend(resources) - raise Exception() - - -class TestDiscovery(plugin_base.DiscoveryBase): - def discover(self, manager, param=None): - self.params.append(param) - return self.resources - - -class TestDiscoveryException(plugin_base.DiscoveryBase): - def discover(self, manager, param=None): - self.params.append(param) - raise Exception() - - -@six.add_metaclass(abc.ABCMeta) -class BaseAgentManagerTestCase(base.BaseTestCase): - - class Pollster(TestPollster): - samples = [] - resources = [] - test_data = default_test_data - - class BatchPollster(BatchTestPollster): - samples = [] - resources = [] - test_data = default_test_data - - class PollsterAnother(TestPollster): - samples = [] - resources = [] - test_data = TestSample( - name='testanother', - type=default_test_data.type, - unit=default_test_data.unit, - volume=default_test_data.volume, - user_id=default_test_data.user_id, - project_id=default_test_data.project_id, - resource_id=default_test_data.resource_id, - timestamp=default_test_data.timestamp, - resource_metadata=default_test_data.resource_metadata) - - class PollsterException(TestPollsterException): - samples = [] - resources = [] - test_data = TestSample( - name='testexception', - type=default_test_data.type, - unit=default_test_data.unit, - volume=default_test_data.volume, - user_id=default_test_data.user_id, - project_id=default_test_data.project_id, - resource_id=default_test_data.resource_id, - timestamp=default_test_data.timestamp, - resource_metadata=default_test_data.resource_metadata) - - class PollsterExceptionAnother(TestPollsterException): - samples = [] - resources = [] - test_data = TestSample( - name='testexceptionanother', - type=default_test_data.type, - unit=default_test_data.unit, - volume=default_test_data.volume, - user_id=default_test_data.user_id, - project_id=default_test_data.project_id, - resource_id=default_test_data.resource_id, - timestamp=default_test_data.timestamp, - resource_metadata=default_test_data.resource_metadata) - - class Discovery(TestDiscovery): - params = [] - resources = [] - - class DiscoveryAnother(TestDiscovery): - params = [] - resources = [] - - @property - def group_id(self): - return 'another_group' - - class DiscoveryException(TestDiscoveryException): - params = [] - - def setup_polling(self): - self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) - - def create_extension_list(self): - return [extension.Extension('test', - None, - None, - self.Pollster(), ), - extension.Extension('testbatch', - None, - None, - self.BatchPollster(), ), - extension.Extension('testanother', - None, - None, - self.PollsterAnother(), ), - extension.Extension('testexception', - None, - None, - self.PollsterException(), ), - extension.Extension('testexceptionanother', - None, - None, - self.PollsterExceptionAnother(), )] - - def create_discovery_manager(self): - return extension.ExtensionManager.make_test_instance( - [ - extension.Extension( - 'testdiscovery', - None, - None, - self.Discovery(), ), - extension.Extension( - 'testdiscoveryanother', - None, - None, - self.DiscoveryAnother(), ), - extension.Extension( - 'testdiscoveryexception', - None, - None, - self.DiscoveryException(), ), - ], - ) - - @abc.abstractmethod - def create_manager(self): - """Return subclass specific manager.""" - - @mock.patch('ceilometer.pipeline.setup_polling', mock.MagicMock()) - def setUp(self): - super(BaseAgentManagerTestCase, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override( - 'pipeline_cfg_file', - self.path_get('etc/ceilometer/pipeline.yaml') - ) - self.CONF(args=[]) - self.mgr = self.create_manager() - self.mgr.extensions = self.create_extension_list() - self.mgr.partition_coordinator = mock.MagicMock() - fake_subset = lambda _, x: x - p_coord = self.mgr.partition_coordinator - p_coord.extract_my_subset.side_effect = fake_subset - self.mgr.tg = mock.MagicMock() - self.pipeline_cfg = { - 'sources': [{ - 'name': 'test_pipeline', - 'interval': 60, - 'meters': ['test'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - } - self.setup_polling() - self.useFixture(mockpatch.PatchObject( - publisher, 'get_publisher', side_effect=self.get_publisher)) - - @staticmethod - def get_publisher(url, namespace=''): - fake_drivers = {'test://': test_publisher.TestPublisher, - 'new://': test_publisher.TestPublisher, - 'rpc://': test_publisher.TestPublisher} - return fake_drivers[url](url) - - def tearDown(self): - self.Pollster.samples = [] - self.Pollster.discovery = [] - self.PollsterAnother.samples = [] - self.PollsterAnother.discovery = [] - self.PollsterException.samples = [] - self.PollsterException.discovery = [] - self.PollsterExceptionAnother.samples = [] - self.PollsterExceptionAnother.discovery = [] - self.Pollster.resources = [] - self.PollsterAnother.resources = [] - self.PollsterException.resources = [] - self.PollsterExceptionAnother.resources = [] - self.Discovery.params = [] - self.DiscoveryAnother.params = [] - self.DiscoveryException.params = [] - self.Discovery.resources = [] - self.DiscoveryAnother.resources = [] - super(BaseAgentManagerTestCase, self).tearDown() - - @mock.patch('ceilometer.pipeline.setup_polling') - def test_start(self, setup_polling): - self.mgr.join_partitioning_groups = mock.MagicMock() - self.mgr.setup_polling_tasks = mock.MagicMock() - self.CONF.set_override('heartbeat', 1.0, group='coordination') - self.mgr.start() - setup_polling.assert_called_once_with() - self.mgr.partition_coordinator.start.assert_called_once_with() - self.mgr.join_partitioning_groups.assert_called_once_with() - self.mgr.setup_polling_tasks.assert_called_once_with() - timer_call = mock.call(1.0, self.mgr.partition_coordinator.heartbeat) - self.assertEqual([timer_call], self.mgr.tg.add_timer.call_args_list) - self.mgr.stop() - self.mgr.partition_coordinator.stop.assert_called_once_with() - - @mock.patch('ceilometer.pipeline.setup_polling') - def test_start_with_pipeline_poller(self, setup_polling): - self.mgr.join_partitioning_groups = mock.MagicMock() - self.mgr.setup_polling_tasks = mock.MagicMock() - - self.CONF.set_override('heartbeat', 1.0, group='coordination') - self.CONF.set_override('refresh_pipeline_cfg', True) - self.CONF.set_override('pipeline_polling_interval', 5) - self.mgr.start() - self.addCleanup(self.mgr.stop) - setup_polling.assert_called_once_with() - self.mgr.partition_coordinator.start.assert_called_once_with() - self.mgr.join_partitioning_groups.assert_called_once_with() - self.mgr.setup_polling_tasks.assert_called_once_with() - timer_call = mock.call(1.0, self.mgr.partition_coordinator.heartbeat) - pipeline_poller_call = mock.call(5, self.mgr.refresh_pipeline) - self.assertEqual([timer_call, pipeline_poller_call], - self.mgr.tg.add_timer.call_args_list) - - def test_join_partitioning_groups(self): - self.mgr.discovery_manager = self.create_discovery_manager() - self.mgr.join_partitioning_groups() - p_coord = self.mgr.partition_coordinator - static_group_ids = [utils.hash_of_set(p['resources']) - for p in self.pipeline_cfg['sources'] - if p['resources']] - expected = [mock.call(self.mgr.construct_group_id(g)) - for g in ['another_group', 'global'] + static_group_ids] - self.assertEqual(len(expected), len(p_coord.join_group.call_args_list)) - for c in expected: - self.assertIn(c, p_coord.join_group.call_args_list) - - def test_setup_polling_tasks(self): - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(1, len(polling_tasks)) - self.assertIn(60, polling_tasks.keys()) - per_task_resources = polling_tasks[60].resources - self.assertEqual(1, len(per_task_resources)) - self.assertEqual(set(self.pipeline_cfg['sources'][0]['resources']), - set(per_task_resources['test_pipeline-test'].get({}))) - - def test_setup_polling_tasks_multiple_interval(self): - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline_1', - 'interval': 10, - 'meters': ['test'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink'] - }) - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(2, len(polling_tasks)) - self.assertIn(60, polling_tasks.keys()) - self.assertIn(10, polling_tasks.keys()) - - def test_setup_polling_tasks_mismatch_counter(self): - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline_1', - 'interval': 10, - 'meters': ['test_invalid'], - 'resources': ['invalid://'], - 'sinks': ['test_sink'] - }) - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(1, len(polling_tasks)) - self.assertIn(60, polling_tasks.keys()) - self.assertNotIn(10, polling_tasks.keys()) - - def test_setup_polling_task_same_interval(self): - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline_1', - 'interval': 60, - 'meters': ['testanother'], - 'resources': ['testanother://'] if self.source_resources else [], - 'sinks': ['test_sink'] - }) - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(1, len(polling_tasks)) - pollsters = polling_tasks.get(60).pollster_matches - self.assertEqual(2, len(pollsters)) - per_task_resources = polling_tasks[60].resources - self.assertEqual(2, len(per_task_resources)) - key = 'test_pipeline-test' - self.assertEqual(set(self.pipeline_cfg['sources'][0]['resources']), - set(per_task_resources[key].get({}))) - key = 'test_pipeline_1-testanother' - self.assertEqual(set(self.pipeline_cfg['sources'][1]['resources']), - set(per_task_resources[key].get({}))) - - def test_agent_manager_start(self): - mgr = self.create_manager() - mgr.extensions = self.mgr.extensions - mgr.create_polling_task = mock.MagicMock() - mgr.tg = mock.MagicMock() - mgr.start() - self.addCleanup(mgr.stop) - self.assertTrue(mgr.tg.add_timer.called) - - def test_manager_exception_persistency(self): - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline_1', - 'interval': 60, - 'meters': ['testanother'], - 'sinks': ['test_sink'] - }) - self.setup_polling() - - def _verify_discovery_params(self, expected): - self.assertEqual(expected, self.Discovery.params) - self.assertEqual(expected, self.DiscoveryAnother.params) - self.assertEqual(expected, self.DiscoveryException.params) - - def _do_test_per_pollster_discovery(self, discovered_resources, - static_resources): - self.Pollster.discovery = 'testdiscovery' - self.mgr.discovery_manager = self.create_discovery_manager() - self.Discovery.resources = discovered_resources - self.DiscoveryAnother.resources = [d[::-1] - for d in discovered_resources] - if static_resources: - # just so we can test that static + pre_pipeline amalgamated - # override per_pollster - self.pipeline_cfg['sources'][0]['discovery'] = [ - 'testdiscoveryanother', - 'testdiscoverynonexistent', - 'testdiscoveryexception'] - self.pipeline_cfg['sources'][0]['resources'] = static_resources - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(polling_tasks.get(60)) - if static_resources: - self.assertEqual(set(static_resources + - self.DiscoveryAnother.resources), - set(self.Pollster.resources)) - else: - self.assertEqual(set(self.Discovery.resources), - set(self.Pollster.resources)) - - # Make sure no duplicated resource from discovery - for x in self.Pollster.resources: - self.assertEqual(1, self.Pollster.resources.count(x)) - - def test_per_pollster_discovery(self): - self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], - []) - - def test_per_pollster_discovery_overridden_by_per_pipeline_discovery(self): - # ensure static+per_source_discovery overrides per_pollster_discovery - self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], - ['static_1', 'static_2']) - - def test_per_pollster_discovery_duplicated(self): - self._do_test_per_pollster_discovery(['dup', 'discovered_1', 'dup'], - []) - - def test_per_pollster_discovery_overridden_by_duplicated_static(self): - self._do_test_per_pollster_discovery(['discovered_1', 'discovered_2'], - ['static_1', 'dup', 'dup']) - - def test_per_pollster_discovery_caching(self): - # ensure single discovery associated with multiple pollsters - # only called once per polling cycle - discovered_resources = ['discovered_1', 'discovered_2'] - self.Pollster.discovery = 'testdiscovery' - self.PollsterAnother.discovery = 'testdiscovery' - self.mgr.discovery_manager = self.create_discovery_manager() - self.Discovery.resources = discovered_resources - self.pipeline_cfg['sources'][0]['meters'].append('testanother') - self.pipeline_cfg['sources'][0]['resources'] = [] - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(polling_tasks.get(60)) - self.assertEqual(1, len(self.Discovery.params)) - self.assertEqual(discovered_resources, self.Pollster.resources) - self.assertEqual(discovered_resources, self.PollsterAnother.resources) - - def _do_test_per_pipeline_discovery(self, - discovered_resources, - static_resources): - self.mgr.discovery_manager = self.create_discovery_manager() - self.Discovery.resources = discovered_resources - self.DiscoveryAnother.resources = [d[::-1] - for d in discovered_resources] - self.pipeline_cfg['sources'][0]['discovery'] = [ - 'testdiscovery', 'testdiscoveryanother', - 'testdiscoverynonexistent', 'testdiscoveryexception'] - self.pipeline_cfg['sources'][0]['resources'] = static_resources - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(polling_tasks.get(60)) - discovery = self.Discovery.resources + self.DiscoveryAnother.resources - # compare resource lists modulo ordering - self.assertEqual(set(static_resources + discovery), - set(self.Pollster.resources)) - - # Make sure no duplicated resource from discovery - for x in self.Pollster.resources: - self.assertEqual(1, self.Pollster.resources.count(x)) - - def test_per_pipeline_discovery_discovered_only(self): - self._do_test_per_pipeline_discovery(['discovered_1', 'discovered_2'], - []) - - def test_per_pipeline_discovery_static_only(self): - self._do_test_per_pipeline_discovery([], - ['static_1', 'static_2']) - - def test_per_pipeline_discovery_discovered_augmented_by_static(self): - self._do_test_per_pipeline_discovery(['discovered_1', 'discovered_2'], - ['static_1', 'static_2']) - - def test_per_pipeline_discovery_discovered_duplicated_static(self): - self._do_test_per_pipeline_discovery(['discovered_1', 'pud'], - ['dup', 'static_1', 'dup']) - - def test_multiple_pipelines_different_static_resources(self): - # assert that the individual lists of static and discovered resources - # for each pipeline with a common interval are passed to individual - # pollsters matching each pipeline - self.pipeline_cfg['sources'][0]['resources'] = ['test://'] - self.pipeline_cfg['sources'][0]['discovery'] = ['testdiscovery'] - self.pipeline_cfg['sources'].append({ - 'name': 'another_pipeline', - 'interval': 60, - 'meters': ['test'], - 'resources': ['another://'], - 'discovery': ['testdiscoveryanother'], - 'sinks': ['test_sink_new'] - }) - self.mgr.discovery_manager = self.create_discovery_manager() - self.Discovery.resources = ['discovered_1', 'discovered_2'] - self.DiscoveryAnother.resources = ['discovered_3', 'discovered_4'] - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(1, len(polling_tasks)) - self.assertIn(60, polling_tasks.keys()) - self.mgr.interval_task(polling_tasks.get(60)) - self.assertEqual([None], self.Discovery.params) - self.assertEqual([None], self.DiscoveryAnother.params) - self.assertEqual(2, len(self.Pollster.samples)) - samples = self.Pollster.samples - test_resources = ['test://', 'discovered_1', 'discovered_2'] - another_resources = ['another://', 'discovered_3', 'discovered_4'] - if samples[0][1] == test_resources: - self.assertEqual(another_resources, samples[1][1]) - elif samples[0][1] == another_resources: - self.assertEqual(test_resources, samples[1][1]) - else: - self.fail('unexpected sample resources %s' % samples) - - def test_multiple_sources_different_discoverers(self): - self.Discovery.resources = ['discovered_1', 'discovered_2'] - self.DiscoveryAnother.resources = ['discovered_3', 'discovered_4'] - sources = [{'name': 'test_source_1', - 'interval': 60, - 'meters': ['test'], - 'discovery': ['testdiscovery'], - 'sinks': ['test_sink_1']}, - {'name': 'test_source_2', - 'interval': 60, - 'meters': ['testanother'], - 'discovery': ['testdiscoveryanother'], - 'sinks': ['test_sink_2']}] - sinks = [{'name': 'test_sink_1', - 'transformers': [], - 'publishers': ['test://']}, - {'name': 'test_sink_2', - 'transformers': [], - 'publishers': ['test://']}] - self.pipeline_cfg = {'sources': sources, 'sinks': sinks} - self.mgr.discovery_manager = self.create_discovery_manager() - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(1, len(polling_tasks)) - self.assertIn(60, polling_tasks.keys()) - self.mgr.interval_task(polling_tasks.get(60)) - self.assertEqual(1, len(self.Pollster.samples)) - self.assertEqual(['discovered_1', 'discovered_2'], - self.Pollster.resources) - self.assertEqual(1, len(self.PollsterAnother.samples)) - self.assertEqual(['discovered_3', 'discovered_4'], - self.PollsterAnother.resources) - - def test_multiple_sinks_same_discoverer(self): - self.Discovery.resources = ['discovered_1', 'discovered_2'] - sources = [{'name': 'test_source_1', - 'interval': 60, - 'meters': ['test'], - 'discovery': ['testdiscovery'], - 'sinks': ['test_sink_1', 'test_sink_2']}] - sinks = [{'name': 'test_sink_1', - 'transformers': [], - 'publishers': ['test://']}, - {'name': 'test_sink_2', - 'transformers': [], - 'publishers': ['test://']}] - self.pipeline_cfg = {'sources': sources, 'sinks': sinks} - self.mgr.discovery_manager = self.create_discovery_manager() - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.assertEqual(1, len(polling_tasks)) - self.assertIn(60, polling_tasks.keys()) - self.mgr.interval_task(polling_tasks.get(60)) - self.assertEqual(1, len(self.Pollster.samples)) - self.assertEqual(['discovered_1', 'discovered_2'], - self.Pollster.resources) - - def test_discovery_partitioning(self): - self.mgr.discovery_manager = self.create_discovery_manager() - p_coord = self.mgr.partition_coordinator - self.pipeline_cfg['sources'][0]['discovery'] = [ - 'testdiscovery', 'testdiscoveryanother', - 'testdiscoverynonexistent', 'testdiscoveryexception'] - self.pipeline_cfg['sources'][0]['resources'] = [] - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(polling_tasks.get(60)) - expected = [mock.call(self.mgr.construct_group_id(d.obj.group_id), - d.obj.resources) - for d in self.mgr.discovery_manager - if hasattr(d.obj, 'resources')] - self.assertEqual(len(expected), - len(p_coord.extract_my_subset.call_args_list)) - for c in expected: - self.assertIn(c, p_coord.extract_my_subset.call_args_list) - - def test_static_resources_partitioning(self): - p_coord = self.mgr.partition_coordinator - static_resources = ['static_1', 'static_2'] - static_resources2 = ['static_3', 'static_4'] - self.pipeline_cfg['sources'][0]['resources'] = static_resources - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline2', - 'interval': 60, - 'meters': ['test', 'test2'], - 'resources': static_resources2, - 'sinks': ['test_sink'] - }) - # have one pipeline without static resources defined - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline3', - 'interval': 60, - 'meters': ['test', 'test2'], - 'resources': [], - 'sinks': ['test_sink'] - }) - self.setup_polling() - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(polling_tasks.get(60)) - # Only two groups need to be created, one for each pipeline, - # even though counter test is used twice - expected = [mock.call(self.mgr.construct_group_id( - utils.hash_of_set(resources)), - resources) - for resources in [static_resources, - static_resources2]] - self.assertEqual(len(expected), - len(p_coord.extract_my_subset.call_args_list)) - for c in expected: - self.assertIn(c, p_coord.extract_my_subset.call_args_list) - - @mock.patch('ceilometer.agent.manager.LOG') - def test_polling_and_notify_with_resources(self, LOG): - self.setup_polling() - polling_task = list(self.mgr.setup_polling_tasks().values())[0] - polling_task.poll_and_notify() - LOG.info.assert_called_with( - 'Polling pollster %(poll)s in the context of %(src)s', - {'poll': 'test', 'src': 'test_pipeline'}) - - @mock.patch('ceilometer.agent.manager.LOG') - def test_skip_polling_and_notify_with_no_resources(self, LOG): - self.pipeline_cfg['sources'][0]['resources'] = [] - self.setup_polling() - polling_task = list(self.mgr.setup_polling_tasks().values())[0] - pollster = list(polling_task.pollster_matches['test_pipeline'])[0] - polling_task.poll_and_notify() - LOG.info.assert_called_with( - 'Skip pollster %(name)s, no %(p_context)sresources found this ' - 'cycle', {'name': pollster.name, 'p_context': ''}) - - @mock.patch('ceilometer.agent.manager.LOG') - def test_skip_polling_polled_resources(self, LOG): - self.pipeline_cfg['sources'].append({ - 'name': 'test_pipeline_1', - 'interval': 60, - 'meters': ['test'], - 'resources': ['test://'], - 'sinks': ['test_sink'] - }) - self.setup_polling() - polling_task = list(self.mgr.setup_polling_tasks().values())[0] - polling_task.poll_and_notify() - LOG.info.assert_called_with( - 'Skip pollster %(name)s, no %(p_context)sresources found this ' - 'cycle', {'name': 'test', 'p_context': 'new '}) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_polling_samples_timestamp(self, mock_utc): - polled_samples = [] - timestamp = '2222-11-22T00:11:22.333333' - - def fake_send_notification(samples): - polled_samples.extend(samples) - - mock_utc.return_value = datetime.datetime.strptime( - timestamp, "%Y-%m-%dT%H:%M:%S.%f") - - self.setup_polling() - polling_task = list(self.mgr.setup_polling_tasks().values())[0] - polling_task._send_notification = mock.Mock( - side_effect=fake_send_notification) - polling_task.poll_and_notify() - self.assertEqual(timestamp, polled_samples[0]['timestamp']) diff --git a/ceilometer/tests/unit/agent/test_discovery.py b/ceilometer/tests/unit/agent/test_discovery.py deleted file mode 100644 index bf68c26b..00000000 --- a/ceilometer/tests/unit/agent/test_discovery.py +++ /dev/null @@ -1,108 +0,0 @@ -# -# Copyright 2014 Red Hat Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/central/manager.py -""" - -import mock -from oslo_config import fixture as fixture_config -from oslotest import base - -from ceilometer.agent.discovery import endpoint -from ceilometer.agent.discovery import localnode -from ceilometer.hardware import discovery as hardware - - -class TestEndpointDiscovery(base.BaseTestCase): - - def setUp(self): - super(TestEndpointDiscovery, self).setUp() - self.discovery = endpoint.EndpointDiscovery() - self.manager = mock.MagicMock() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override('interface', 'test-endpoint-type', - group='service_credentials') - self.CONF.set_override('region_name', 'test-region-name', - group='service_credentials') - self.catalog = (self.manager.keystone.session.auth.get_access. - return_value.service_catalog) - - def test_keystone_called(self): - self.discovery.discover(self.manager, param='test-service-type') - expected = [mock.call(service_type='test-service-type', - interface='test-endpoint-type', - region_name='test-region-name')] - self.assertEqual(expected, self.catalog.get_urls.call_args_list) - - def test_keystone_called_no_service_type(self): - self.discovery.discover(self.manager) - expected = [mock.call(service_type=None, - interface='test-endpoint-type', - region_name='test-region-name')] - self.assertEqual(expected, - self.catalog.get_urls - .call_args_list) - - def test_keystone_called_no_endpoints(self): - self.catalog.get_urls.return_value = [] - self.assertEqual([], self.discovery.discover(self.manager)) - - -class TestLocalnodeDiscovery(base.BaseTestCase): - def setUp(self): - super(TestLocalnodeDiscovery, self).setUp() - self.discovery = localnode.LocalNodeDiscovery() - self.manager = mock.MagicMock() - - def test_lockalnode_discovery(self): - self.assertEqual(['local_host'], self.discovery.discover(self.manager)) - - -class TestHardwareDiscovery(base.BaseTestCase): - class MockInstance(object): - addresses = {'ctlplane': [ - {'addr': '0.0.0.0', - 'OS-EXT-IPS-MAC:mac_addr': '01-23-45-67-89-ab'} - ]} - id = 'resource_id' - image = {'id': 'image_id'} - flavor = {'id': 'flavor_id'} - - expected = { - 'resource_id': 'resource_id', - 'resource_url': 'snmp://ro_snmp_user:password@0.0.0.0', - 'mac_addr': '01-23-45-67-89-ab', - 'image_id': 'image_id', - 'flavor_id': 'flavor_id', - } - - def setUp(self): - super(TestHardwareDiscovery, self).setUp() - self.discovery = hardware.NodesDiscoveryTripleO() - self.discovery.nova_cli = mock.MagicMock() - self.manager = mock.MagicMock() - - def test_hardware_discovery(self): - self.discovery.nova_cli.instance_get_all.return_value = [ - self.MockInstance()] - resources = self.discovery.discover(self.manager) - self.assertEqual(1, len(resources)) - self.assertEqual(self.expected, resources[0]) - - def test_hardware_discovery_without_flavor(self): - instance = self.MockInstance() - instance.flavor = {} - self.discovery.nova_cli.instance_get_all.return_value = [instance] - resources = self.discovery.discover(self.manager) - self.assertEqual(0, len(resources)) diff --git a/ceilometer/tests/unit/agent/test_manager.py b/ceilometer/tests/unit/agent/test_manager.py deleted file mode 100644 index 4a33f55c..00000000 --- a/ceilometer/tests/unit/agent/test_manager.py +++ /dev/null @@ -1,499 +0,0 @@ -# -# Copyright 2013 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer agent manager""" - -import shutil - -from keystoneclient import exceptions as ks_exceptions -import mock -from novaclient import client as novaclient -from oslo_config import fixture as fixture_config -from oslo_service import service as os_service -from oslo_utils import fileutils -from oslotest import base -from oslotest import mockpatch -import requests -import six -from stevedore import extension -import yaml - -from ceilometer.agent import manager -from ceilometer.agent import plugin_base -from ceilometer.hardware import discovery -from ceilometer import pipeline -from ceilometer.tests.unit.agent import agentbase - - -class PollingException(Exception): - pass - - -class TestPollsterBuilder(agentbase.TestPollster): - @classmethod - def build_pollsters(cls): - return [('builder1', cls()), ('builder2', cls())] - - -@mock.patch('ceilometer.compute.pollsters.' - 'BaseComputePollster.setup_environment', - mock.Mock(return_value=None)) -class TestManager(base.BaseTestCase): - def setUp(self): - super(TestManager, self).setUp() - self.conf = self.useFixture(fixture_config.Config()).conf - self.conf(args=[]) - - @mock.patch('ceilometer.pipeline.setup_polling', mock.MagicMock()) - def test_load_plugins(self): - mgr = manager.AgentManager() - self.assertIsNotNone(list(mgr.extensions)) - - def test_load_plugins_pollster_list(self): - mgr = manager.AgentManager(pollster_list=['disk.*']) - # currently we do have 26 disk-related pollsters - self.assertEqual(26, len(list(mgr.extensions))) - - def test_load_invalid_plugins_pollster_list(self): - # if no valid pollsters have been loaded, the ceilometer - # polling program should exit - self.assertRaisesRegexp( - manager.EmptyPollstersList, - 'No valid pollsters can be loaded with the startup parameters' - ' polling-namespaces and pollster-list.', - manager.AgentManager, - pollster_list=['aaa']) - - def test_load_plugins_no_intersection(self): - # Let's test nothing will be polled if namespace and pollsters - # list have no intersection. - parameters = dict(namespaces=['compute'], - pollster_list=['storage.*']) - self.assertRaisesRegexp( - manager.EmptyPollstersList, - 'No valid pollsters can be loaded with the startup parameters' - ' polling-namespaces and pollster-list.', - manager.AgentManager, - parameters) - - # Test plugin load behavior based on Node Manager pollsters. - # pollster_list is just a filter, so sensor pollsters under 'ipmi' - # namespace would be also instanced. Still need mock __init__ for it. - @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', - mock.Mock(return_value=None)) - @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', - mock.Mock(return_value=None)) - def test_load_normal_plugins(self): - mgr = manager.AgentManager(namespaces=['ipmi'], - pollster_list=['hardware.ipmi.node.*']) - # 8 pollsters for Node Manager - self.assertEqual(8, len(mgr.extensions)) - - # Skip loading pollster upon ExtensionLoadError - @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', - mock.Mock(side_effect=plugin_base.ExtensionLoadError)) - @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', - mock.Mock(return_value=None)) - @mock.patch('ceilometer.agent.manager.LOG') - def test_load_failed_plugins(self, LOG): - # Here we additionally check that namespaces will be converted to the - # list if param was not set as a list. - try: - manager.AgentManager(namespaces='ipmi', - pollster_list=['hardware.ipmi.node.*']) - except manager.EmptyPollstersList: - err_msg = 'Skip loading extension for hardware.ipmi.node.%s' - pollster_names = [ - 'power', 'temperature', 'outlet_temperature', - 'airflow', 'cups', 'cpu_util', 'mem_util', 'io_util'] - calls = [mock.call(err_msg % n) for n in pollster_names] - LOG.exception.assert_has_calls(calls=calls, any_order=True) - - # Skip loading pollster upon ImportError - @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', - mock.Mock(side_effect=ImportError)) - @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', - mock.Mock(return_value=None)) - def test_import_error_in_plugin(self): - parameters = dict(namespaces=['ipmi'], - pollster_list=['hardware.ipmi.node.*']) - self.assertRaisesRegexp( - manager.EmptyPollstersList, - 'No valid pollsters can be loaded with the startup parameters' - ' polling-namespaces and pollster-list.', - manager.AgentManager, - parameters) - - # Exceptions other than ExtensionLoadError are propagated - @mock.patch('ceilometer.ipmi.pollsters.node._Base.__init__', - mock.Mock(side_effect=PollingException)) - @mock.patch('ceilometer.ipmi.pollsters.sensor.SensorPollster.__init__', - mock.Mock(return_value=None)) - def test_load_exceptional_plugins(self): - self.assertRaises(PollingException, - manager.AgentManager, - ['ipmi'], - ['hardware.ipmi.node.*']) - - def test_load_plugins_pollster_list_forbidden(self): - manager.cfg.CONF.set_override('backend_url', 'http://', - group='coordination') - self.assertRaises(manager.PollsterListForbidden, - manager.AgentManager, - pollster_list=['disk.*']) - manager.cfg.CONF.reset() - - def test_builder(self): - @staticmethod - def fake_get_ext_mgr(namespace): - if 'builder' in namespace: - return extension.ExtensionManager.make_test_instance( - [ - extension.Extension('builder', - None, - TestPollsterBuilder, - None), - ] - ) - else: - return extension.ExtensionManager.make_test_instance( - [ - extension.Extension('test', - None, - None, - agentbase.TestPollster()), - ] - ) - - with mock.patch.object(manager.AgentManager, '_get_ext_mgr', - new=fake_get_ext_mgr): - mgr = manager.AgentManager(namespaces=['central']) - self.assertEqual(3, len(mgr.extensions)) - for ext in mgr.extensions: - self.assertIn(ext.name, ['builder1', 'builder2', 'test']) - self.assertIsInstance(ext.obj, agentbase.TestPollster) - - -class TestPollsterKeystone(agentbase.TestPollster): - def get_samples(self, manager, cache, resources): - # Just try to use keystone, that will raise an exception - manager.keystone.projects.list() - - -class TestPollsterPollingException(agentbase.TestPollster): - polling_failures = 0 - - def get_samples(self, manager, cache, resources): - func = super(TestPollsterPollingException, self).get_samples - sample = func(manager=manager, - cache=cache, - resources=resources) - - # Raise polling exception after 2 times - self.polling_failures += 1 - if self.polling_failures > 2: - raise plugin_base.PollsterPermanentError(resources) - - return sample - - -class TestRunTasks(agentbase.BaseAgentManagerTestCase): - - class PollsterKeystone(TestPollsterKeystone): - samples = [] - resources = [] - test_data = agentbase.TestSample( - name='testkeystone', - type=agentbase.default_test_data.type, - unit=agentbase.default_test_data.unit, - volume=agentbase.default_test_data.volume, - user_id=agentbase.default_test_data.user_id, - project_id=agentbase.default_test_data.project_id, - resource_id=agentbase.default_test_data.resource_id, - timestamp=agentbase.default_test_data.timestamp, - resource_metadata=agentbase.default_test_data.resource_metadata) - - class PollsterPollingException(TestPollsterPollingException): - samples = [] - resources = [] - test_data = agentbase.TestSample( - name='testpollingexception', - type=agentbase.default_test_data.type, - unit=agentbase.default_test_data.unit, - volume=agentbase.default_test_data.volume, - user_id=agentbase.default_test_data.user_id, - project_id=agentbase.default_test_data.project_id, - resource_id=agentbase.default_test_data.resource_id, - timestamp=agentbase.default_test_data.timestamp, - resource_metadata=agentbase.default_test_data.resource_metadata) - - @staticmethod - @mock.patch('ceilometer.compute.pollsters.' - 'BaseComputePollster.setup_environment', - mock.Mock(return_value=None)) - def create_manager(): - return manager.AgentManager() - - @staticmethod - def setup_pipeline_file(pipeline): - if six.PY3: - pipeline = pipeline.encode('utf-8') - - pipeline_cfg_file = fileutils.write_to_tempfile(content=pipeline, - prefix="pipeline", - suffix="yaml") - return pipeline_cfg_file - - def fake_notifier_sample(self, ctxt, event_type, payload): - for m in payload['samples']: - del m['message_signature'] - self.notified_samples.append(m) - - def setUp(self): - self.notified_samples = [] - self.notifier = mock.Mock() - self.notifier.sample.side_effect = self.fake_notifier_sample - self.useFixture(mockpatch.Patch('oslo_messaging.Notifier', - return_value=self.notifier)) - self.source_resources = True - super(TestRunTasks, self).setUp() - self.useFixture(mockpatch.Patch( - 'keystoneclient.v2_0.client.Client', - return_value=mock.Mock())) - - def tearDown(self): - self.PollsterKeystone.samples = [] - self.PollsterKeystone.resources = [] - self.PollsterPollingException.samples = [] - self.PollsterPollingException.resources = [] - super(TestRunTasks, self).tearDown() - - def create_extension_list(self): - exts = super(TestRunTasks, self).create_extension_list() - exts.extend([extension.Extension('testkeystone', - None, - None, - self.PollsterKeystone(), ), - extension.Extension('testpollingexception', - None, - None, - self.PollsterPollingException(), )]) - return exts - - def test_get_sample_resources(self): - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(list(polling_tasks.values())[0]) - self.assertTrue(self.Pollster.resources) - - def test_when_keystone_fail(self): - """Test for bug 1316532.""" - self.useFixture(mockpatch.Patch( - 'keystoneclient.v2_0.client.Client', - side_effect=ks_exceptions.ClientException)) - self.pipeline_cfg = { - 'sources': [{ - 'name': "test_keystone", - 'interval': 10, - 'meters': ['testkeystone'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - } - self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(list(polling_tasks.values())[0]) - self.assertFalse(self.PollsterKeystone.samples) - self.assertFalse(self.notified_samples) - - @mock.patch('ceilometer.agent.manager.LOG') - @mock.patch('ceilometer.nova_client.LOG') - def test_hardware_discover_fail_minimize_logs(self, novalog, baselog): - self.useFixture(mockpatch.PatchObject( - novaclient.HTTPClient, - 'authenticate', - side_effect=requests.ConnectionError)) - - class PollsterHardware(agentbase.TestPollster): - discovery = 'tripleo_overcloud_nodes' - - class PollsterHardwareAnother(agentbase.TestPollster): - discovery = 'tripleo_overcloud_nodes' - - self.mgr.extensions.extend([ - extension.Extension('testhardware', - None, - None, - PollsterHardware(), ), - extension.Extension('testhardware2', - None, - None, - PollsterHardwareAnother(), ) - ]) - ext = extension.Extension('tripleo_overcloud_nodes', - None, - None, - discovery.NodesDiscoveryTripleO()) - self.mgr.discovery_manager = (extension.ExtensionManager - .make_test_instance([ext])) - - self.pipeline_cfg = { - 'sources': [{ - 'name': "test_hardware", - 'interval': 10, - 'meters': ['testhardware', 'testhardware2'], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - } - self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) - polling_tasks = self.mgr.setup_polling_tasks() - self.mgr.interval_task(list(polling_tasks.values())[0]) - self.assertEqual(1, novalog.exception.call_count) - self.assertFalse(baselog.exception.called) - - @mock.patch('ceilometer.agent.manager.LOG') - def test_polling_exception(self, LOG): - source_name = 'test_pollingexception' - self.pipeline_cfg = { - 'sources': [{ - 'name': source_name, - 'interval': 10, - 'meters': ['testpollingexception'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - } - self.mgr.polling_manager = pipeline.PollingManager(self.pipeline_cfg) - polling_task = list(self.mgr.setup_polling_tasks().values())[0] - pollster = list(polling_task.pollster_matches[source_name])[0] - - # 2 samples after 4 pollings, as pollster got disabled upon exception - for x in range(0, 4): - self.mgr.interval_task(polling_task) - samples = self.notified_samples - self.assertEqual(2, len(samples)) - LOG.error.assert_called_once_with(( - 'Prevent pollster %(name)s for ' - 'polling source %(source)s anymore!') - % ({'name': pollster.name, 'source': source_name})) - - def test_batching_polled_samples_false(self): - self.CONF.set_override('batch_polled_samples', False) - self._batching_samples(4, 4) - - def test_batching_polled_samples_true(self): - self.CONF.set_override('batch_polled_samples', True) - self._batching_samples(4, 1) - - def test_batching_polled_samples_default(self): - self._batching_samples(4, 1) - - def _batching_samples(self, expected_samples, call_count): - pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_pipeline', - 'interval': 1, - 'meters': ['testbatch'], - 'resources': ['alpha', 'beta', 'gamma', 'delta'], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - }) - - pipeline_cfg_file = self.setup_pipeline_file(pipeline) - - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - - self.mgr.tg = os_service.threadgroup.ThreadGroup(1000) - self.mgr.start() - self.addCleanup(self.mgr.stop) - # Manually executes callbacks - for timer in self.mgr.pollster_timers: - timer.f(*timer.args, **timer.kw) - - samples = self.notified_samples - self.assertEqual(expected_samples, len(samples)) - self.assertEqual(call_count, self.notifier.sample.call_count) - - def test_start_with_reloadable_pipeline(self): - - self.CONF.set_override('heartbeat', 1.0, group='coordination') - self.CONF.set_override('refresh_pipeline_cfg', True) - self.CONF.set_override('pipeline_polling_interval', 2) - - pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_pipeline', - 'interval': 1, - 'meters': ['test'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - }) - - pipeline_cfg_file = self.setup_pipeline_file(pipeline) - - self.CONF.set_override("pipeline_cfg_file", pipeline_cfg_file) - self.mgr.tg = os_service.threadgroup.ThreadGroup(1000) - self.mgr.start() - self.addCleanup(self.mgr.stop) - - # we only got the old name of meters - for sample in self.notified_samples: - self.assertEqual('test', sample['counter_name']) - self.assertEqual(1, sample['counter_volume']) - self.assertEqual('test_run_tasks', sample['resource_id']) - - # Modify the collection targets - pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_pipeline', - 'interval': 1, - 'meters': ['testanother'], - 'resources': ['test://'] if self.source_resources else [], - 'sinks': ['test_sink']}], - 'sinks': [{ - 'name': 'test_sink', - 'transformers': [], - 'publishers': ["test"]}] - }) - - updated_pipeline_cfg_file = self.setup_pipeline_file(pipeline) - - # Move/rename the updated pipeline file to the original pipeline - # file path as recorded in oslo config - shutil.move(updated_pipeline_cfg_file, pipeline_cfg_file) - - # Flush notified samples to test only new, nothing latent on - # fake message bus. - self.notified_samples = [] - - # we only got the new name of meters - for sample in self.notified_samples: - self.assertEqual('testanother', sample['counter_name']) - self.assertEqual(1, sample['counter_volume']) - self.assertEqual('test_run_tasks', sample['resource_id']) diff --git a/ceilometer/tests/unit/agent/test_plugin.py b/ceilometer/tests/unit/agent/test_plugin.py deleted file mode 100644 index e3a30b34..00000000 --- a/ceilometer/tests/unit/agent/test_plugin.py +++ /dev/null @@ -1,60 +0,0 @@ -# -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import fixture as fixture_config -from oslotest import base - -from ceilometer.agent import plugin_base - - -class NotificationBaseTestCase(base.BaseTestCase): - def setUp(self): - super(NotificationBaseTestCase, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - - class FakePlugin(plugin_base.NotificationBase): - event_types = ['compute.*'] - - def process_notification(self, message): - pass - - def get_targets(self, conf): - pass - - def test_plugin_info(self): - plugin = self.FakePlugin(mock.Mock()) - plugin.to_samples_and_publish = mock.Mock() - message = { - 'ctxt': {'user_id': 'fake_user_id', - 'project_id': 'fake_project_id'}, - 'publisher_id': 'fake.publisher_id', - 'event_type': 'fake.event', - 'payload': {'foo': 'bar'}, - 'metadata': {'message_id': '3577a84f-29ec-4904-9566-12c52289c2e8', - 'timestamp': '2015-06-1909:19:35.786893'} - } - plugin.info([message]) - notification = { - 'priority': 'info', - 'event_type': 'fake.event', - 'timestamp': '2015-06-1909:19:35.786893', - '_context_user_id': 'fake_user_id', - '_context_project_id': 'fake_project_id', - 'publisher_id': 'fake.publisher_id', - 'payload': {'foo': 'bar'}, - 'message_id': '3577a84f-29ec-4904-9566-12c52289c2e8' - } - plugin.to_samples_and_publish.assert_called_with(notification) diff --git a/ceilometer/tests/unit/api/test_hooks.py b/ceilometer/tests/unit/api/test_hooks.py deleted file mode 100644 index 96bc023b..00000000 --- a/ceilometer/tests/unit/api/test_hooks.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2015 Huawei Technologies Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import fixture as fixture_config -import oslo_messaging - -from ceilometer.api import hooks -from ceilometer.tests import base - - -class TestTestNotifierHook(base.BaseTestCase): - - def setUp(self): - super(TestTestNotifierHook, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - - def test_init_notifier_with_drivers(self): - self.CONF.set_override('telemetry_driver', 'messagingv2', - group='publisher_notifier') - hook = hooks.NotifierHook() - notifier = hook.notifier - self.assertIsInstance(notifier, oslo_messaging.Notifier) - self.assertEqual(['messagingv2'], notifier._driver_names) diff --git a/ceilometer/tests/unit/api/v2/test_complex_query.py b/ceilometer/tests/unit/api/v2/test_complex_query.py deleted file mode 100644 index 363e2112..00000000 --- a/ceilometer/tests/unit/api/v2/test_complex_query.py +++ /dev/null @@ -1,363 +0,0 @@ -# -# Copyright Ericsson AB 2013. All rights reserved -# -# Authors: Ildiko Vancsa -# Balazs Gibizer -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test the methods related to complex query.""" -import datetime - -import fixtures -import jsonschema -import mock -from oslotest import base -import wsme - -from ceilometer.api.controllers.v2 import query -from ceilometer.storage import models - - -class FakeComplexQuery(query.ValidatedComplexQuery): - def __init__(self, db_model, additional_name_mapping=None, metadata=False): - super(FakeComplexQuery, self).__init__(query=None, - db_model=db_model, - additional_name_mapping=( - additional_name_mapping or - {}), - metadata_allowed=metadata) - - -sample_name_mapping = {"resource": "resource_id", - "meter": "counter_name", - "type": "counter_type", - "unit": "counter_unit", - "volume": "counter_volume"} - - -class TestComplexQuery(base.BaseTestCase): - def setUp(self): - super(TestComplexQuery, self).setUp() - self.useFixture(fixtures.MonkeyPatch( - 'pecan.response', mock.MagicMock())) - self.query = FakeComplexQuery(models.Sample, - sample_name_mapping, - True) - - def test_replace_isotime_utc(self): - filter_expr = {"=": {"timestamp": "2013-12-05T19:38:29Z"}} - self.query._replace_isotime_with_datetime(filter_expr) - self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), - filter_expr["="]["timestamp"]) - - def test_replace_isotime_timezone_removed(self): - filter_expr = {"=": {"timestamp": "2013-12-05T20:38:29+01:00"}} - self.query._replace_isotime_with_datetime(filter_expr) - self.assertEqual(datetime.datetime(2013, 12, 5, 20, 38, 29), - filter_expr["="]["timestamp"]) - - def test_replace_isotime_wrong_syntax(self): - filter_expr = {"=": {"timestamp": "not a valid isotime string"}} - self.assertRaises(wsme.exc.ClientSideError, - self.query._replace_isotime_with_datetime, - filter_expr) - - def test_replace_isotime_in_complex_filter(self): - filter_expr = {"and": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, - {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} - self.query._replace_isotime_with_datetime(filter_expr) - self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), - filter_expr["and"][0]["="]["timestamp"]) - self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), - filter_expr["and"][1]["="]["timestamp"]) - - def test_replace_isotime_in_complex_filter_with_unbalanced_tree(self): - subfilter = {"and": [{"=": {"project_id": 42}}, - {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} - - filter_expr = {"or": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, - subfilter]} - - self.query._replace_isotime_with_datetime(filter_expr) - self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), - filter_expr["or"][0]["="]["timestamp"]) - self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), - filter_expr["or"][1]["and"][1]["="]["timestamp"]) - - def test_convert_operator_to_lower_case(self): - filter_expr = {"AND": [{"=": {"project_id": 42}}, - {"=": {"project_id": 44}}]} - self.query._convert_operator_to_lower_case(filter_expr) - self.assertEqual("and", list(filter_expr.keys())[0]) - - filter_expr = {"Or": [{"=": {"project_id": 43}}, - {"anD": [{"=": {"project_id": 44}}, - {"=": {"project_id": 42}}]}]} - self.query._convert_operator_to_lower_case(filter_expr) - self.assertEqual("or", list(filter_expr.keys())[0]) - self.assertEqual("and", list(filter_expr["or"][1].keys())[0]) - - def test_invalid_filter_misstyped_field_name_samples(self): - filter = {"=": {"project_id11": 42}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_invalid_complex_filter_wrong_field_names(self): - filter = {"and": - [{"=": {"non_existing_field": 42}}, - {"=": {"project_id": 42}}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - filter = {"or": - [{"=": {"non_existing_field": 42}}, - {"and": - [{"=": {"project_id": 44}}, - {"=": {"project_id": 42}}]}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_convert_orderby(self): - orderby = [] - self.query._convert_orderby_to_lower_case(orderby) - self.assertEqual([], orderby) - - orderby = [{"project_id": "DESC"}] - self.query._convert_orderby_to_lower_case(orderby) - self.assertEqual([{"project_id": "desc"}], orderby) - - orderby = [{"project_id": "ASC"}, {"resource_id": "DESC"}] - self.query._convert_orderby_to_lower_case(orderby) - self.assertEqual([{"project_id": "asc"}, {"resource_id": "desc"}], - orderby) - - def test_validate_orderby_empty_direction(self): - orderby = [{"project_id": ""}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - orderby = [{"project_id": "asc"}, {"resource_id": ""}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_wrong_order_string(self): - orderby = [{"project_id": "not a valid order"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_wrong_multiple_item_order_string(self): - orderby = [{"project_id": "not a valid order"}, {"resource_id": "ASC"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_empty_field_name(self): - orderby = [{"": "ASC"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - orderby = [{"project_id": "asc"}, {"": "desc"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_wrong_field_name(self): - orderby = [{"project_id11": "ASC"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_wrong_field_name_multiple_item_orderby(self): - orderby = [{"project_id": "asc"}, {"resource_id11": "ASC"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - def test_validate_orderby_metadata_is_not_allowed(self): - orderby = [{"metadata.display_name": "asc"}] - self.assertRaises(jsonschema.ValidationError, - self.query._validate_orderby, - orderby) - - -class TestFilterSyntaxValidation(base.BaseTestCase): - def setUp(self): - super(TestFilterSyntaxValidation, self).setUp() - self.query = FakeComplexQuery(models.Sample, - sample_name_mapping, - True) - - def test_simple_operator(self): - filter = {"=": {"project_id": "string_value"}} - self.query._validate_filter(filter) - - filter = {"=>": {"project_id": "string_value"}} - self.query._validate_filter(filter) - - def test_valid_value_types(self): - filter = {"=": {"project_id": "string_value"}} - self.query._validate_filter(filter) - - filter = {"=": {"project_id": 42}} - self.query._validate_filter(filter) - - filter = {"=": {"project_id": 3.14}} - self.query._validate_filter(filter) - - filter = {"=": {"project_id": True}} - self.query._validate_filter(filter) - - filter = {"=": {"project_id": False}} - self.query._validate_filter(filter) - - def test_invalid_simple_operator(self): - filter = {"==": {"project_id": "string_value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - filter = {"": {"project_id": "string_value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_more_than_one_operator_is_invalid(self): - filter = {"=": {"project_id": "string_value"}, - "<": {"": ""}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_empty_expression_is_invalid(self): - filter = {} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_invalid_field_name(self): - filter = {"=": {"": "value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - filter = {"=": {" ": "value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - filter = {"=": {"\t": "value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_more_than_one_field_is_invalid(self): - filter = {"=": {"project_id": "value", "resource_id": "value"}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_missing_field_after_simple_op_is_invalid(self): - filter = {"=": {}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_and_or(self): - filter = {"and": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}]} - self.query._validate_filter(filter) - - filter = {"or": [{"and": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}]}, - {"=": {"counter_name": "value"}}]} - self.query._validate_filter(filter) - - filter = {"or": [{"and": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}, - {"<": {"counter_name": 42}}]}, - {"=": {"counter_name": "value"}}]} - self.query._validate_filter(filter) - - def test_complex_operator_with_in(self): - filter = {"and": [{"<": {"counter_volume": 42}}, - {">=": {"counter_volume": 36}}, - {"in": {"project_id": ["project_id1", - "project_id2", - "project_id3"]}}]} - self.query._validate_filter(filter) - - def test_invalid_complex_operator(self): - filter = {"xor": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_and_or_with_one_child_is_invalid(self): - filter = {"or": [{"=": {"project_id": "string_value"}}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_complex_operator_with_zero_child_is_invalid(self): - filter = {"or": []} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_more_than_one_complex_operator_is_invalid(self): - filter = {"and": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}], - "or": [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}]} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_not(self): - filter = {"not": {"=": {"project_id": "value"}}} - self.query._validate_filter(filter) - - filter = { - "not": - {"or": - [{"and": - [{"=": {"project_id": "string_value"}}, - {"=": {"resource_id": "value"}}, - {"<": {"counter_name": 42}}]}, - {"=": {"counter_name": "value"}}]}} - self.query._validate_filter(filter) - - def test_not_with_zero_child_is_invalid(self): - filter = {"not": {}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_not_with_more_than_one_child_is_invalid(self): - filter = {"not": {"=": {"project_id": "value"}, - "!=": {"resource_id": "value"}}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) - - def test_empty_in_query_not_passing(self): - filter = {"in": {"resource_id": []}} - self.assertRaises(jsonschema.ValidationError, - self.query._validate_filter, - filter) diff --git a/ceilometer/tests/unit/api/v2/test_query.py b/ceilometer/tests/unit/api/v2/test_query.py index 5ed3d12c..0e57aaab 100644 --- a/ceilometer/tests/unit/api/v2/test_query.py +++ b/ceilometer/tests/unit/api/v2/test_query.py @@ -18,18 +18,12 @@ import datetime import fixtures import mock -from oslo_utils import timeutils from oslotest import base from oslotest import mockpatch import wsme from ceilometer.api.controllers.v2 import base as v2_base from ceilometer.api.controllers.v2 import events -from ceilometer.api.controllers.v2 import meters -from ceilometer.api.controllers.v2 import utils -from ceilometer import storage -from ceilometer.storage import base as storage_base -from ceilometer.tests import base as tests_base class TestQuery(base.BaseTestCase): @@ -166,237 +160,3 @@ class TestQuery(base.BaseTestCase): type='string') self.assertRaises(v2_base.ClientSideError, events._event_query_to_event_filter, [query]) - - -class TestValidateGroupByFields(base.BaseTestCase): - - def test_valid_field(self): - result = meters._validate_groupby_fields(['user_id']) - self.assertEqual(['user_id'], result) - - def test_valid_fields_multiple(self): - result = set(meters._validate_groupby_fields( - ['user_id', 'project_id', 'source'])) - self.assertEqual(set(['user_id', 'project_id', 'source']), result) - - def test_invalid_field(self): - self.assertRaises(wsme.exc.UnknownArgument, - meters._validate_groupby_fields, - ['wtf']) - - def test_invalid_field_multiple(self): - self.assertRaises(wsme.exc.UnknownArgument, - meters._validate_groupby_fields, - ['user_id', 'wtf', 'project_id', 'source']) - - def test_duplicate_fields(self): - result = set( - meters._validate_groupby_fields(['user_id', 'source', 'user_id']) - ) - self.assertEqual(set(['user_id', 'source']), result) - - -class TestQueryToKwArgs(tests_base.BaseTestCase): - def setUp(self): - super(TestQueryToKwArgs, self).setUp() - self.useFixture(mockpatch.PatchObject( - utils, 'sanitize_query', side_effect=lambda x, y, **z: x)) - self.useFixture(mockpatch.PatchObject( - utils, '_verify_query_segregation', side_effect=lambda x, **z: x)) - - def test_sample_filter_single(self): - q = [v2_base.Query(field='user_id', - op='eq', - value='uid')] - kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) - self.assertIn('user', kwargs) - self.assertEqual(1, len(kwargs)) - self.assertEqual('uid', kwargs['user']) - - def test_sample_filter_multi(self): - q = [v2_base.Query(field='user_id', - op='eq', - value='uid'), - v2_base.Query(field='project_id', - op='eq', - value='pid'), - v2_base.Query(field='resource_id', - op='eq', - value='rid'), - v2_base.Query(field='source', - op='eq', - value='source_name'), - v2_base.Query(field='meter', - op='eq', - value='meter_name')] - kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) - self.assertEqual(5, len(kwargs)) - self.assertEqual('uid', kwargs['user']) - self.assertEqual('pid', kwargs['project']) - self.assertEqual('rid', kwargs['resource']) - self.assertEqual('source_name', kwargs['source']) - self.assertEqual('meter_name', kwargs['meter']) - - def test_sample_filter_timestamp(self): - ts_start = timeutils.utcnow() - ts_end = ts_start + datetime.timedelta(minutes=5) - q = [v2_base.Query(field='timestamp', - op='lt', - value=str(ts_end)), - v2_base.Query(field='timestamp', - op='gt', - value=str(ts_start))] - kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) - self.assertEqual(4, len(kwargs)) - self.assertTimestampEqual(kwargs['start_timestamp'], ts_start) - self.assertTimestampEqual(kwargs['end_timestamp'], ts_end) - self.assertEqual('gt', kwargs['start_timestamp_op']) - self.assertEqual('lt', kwargs['end_timestamp_op']) - - def test_sample_filter_meta(self): - q = [v2_base.Query(field='metadata.size', - op='eq', - value='20'), - v2_base.Query(field='resource_metadata.id', - op='eq', - value='meta_id')] - kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) - self.assertEqual(1, len(kwargs)) - self.assertEqual(2, len(kwargs['metaquery'])) - self.assertEqual(20, kwargs['metaquery']['metadata.size']) - self.assertEqual('meta_id', kwargs['metaquery']['metadata.id']) - - def test_sample_filter_non_equality_on_metadata(self): - queries = [v2_base.Query(field='resource_metadata.image_id', - op='gt', - value='image', - type='string'), - v2_base.Query(field='metadata.ramdisk_id', - op='le', - value='ramdisk', - type='string')] - with mock.patch('pecan.request') as request: - request.headers.return_value = {'X-ProjectId': 'foobar'} - self.assertRaises( - wsme.exc.InvalidInput, - utils.query_to_kwargs, - queries, - storage.SampleFilter.__init__) - - def test_sample_filter_invalid_field(self): - q = [v2_base.Query(field='invalid', - op='eq', - value='20')] - self.assertRaises( - wsme.exc.UnknownArgument, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - - def test_sample_filter_invalid_op(self): - q = [v2_base.Query(field='user_id', - op='lt', - value='20')] - self.assertRaises( - wsme.exc.InvalidInput, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - - def test_sample_filter_timestamp_invalid_op(self): - ts_start = timeutils.utcnow() - q = [v2_base.Query(field='timestamp', - op='eq', - value=str(ts_start))] - self.assertRaises( - wsme.exc.InvalidInput, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - - def test_sample_filter_exclude_internal(self): - queries = [v2_base.Query(field=f, - op='eq', - value='fake', - type='string') - for f in ['y', 'on_behalf_of', 'x']] - with mock.patch('pecan.request') as request: - request.headers.return_value = {'X-ProjectId': 'foobar'} - self.assertRaises(wsme.exc.ClientSideError, - utils.query_to_kwargs, - queries, - storage.SampleFilter.__init__, - internal_keys=['on_behalf_of']) - - def test_sample_filter_self_always_excluded(self): - queries = [v2_base.Query(field='user_id', - op='eq', - value='20')] - with mock.patch('pecan.request') as request: - request.headers.return_value = {'X-ProjectId': 'foobar'} - kwargs = utils.query_to_kwargs(queries, - storage.SampleFilter.__init__) - self.assertNotIn('self', kwargs) - - def test_sample_filter_translation(self): - queries = [v2_base.Query(field=f, - op='eq', - value='fake_%s' % f, - type='string') for f in ['user_id', - 'project_id', - 'resource_id']] - with mock.patch('pecan.request') as request: - request.headers.return_value = {'X-ProjectId': 'foobar'} - kwargs = utils.query_to_kwargs(queries, - storage.SampleFilter.__init__) - for o in ['user', 'project', 'resource']: - self.assertEqual('fake_%s_id' % o, kwargs.get(o)) - - def test_timestamp_validation(self): - q = [v2_base.Query(field='timestamp', - op='le', - value='123')] - - exc = self.assertRaises( - wsme.exc.InvalidInput, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - expected_exc = wsme.exc.InvalidInput('timestamp', '123', - 'invalid timestamp format') - self.assertEqual(str(expected_exc), str(exc)) - - def test_sample_filter_valid_fields(self): - q = [v2_base.Query(field='abc', - op='eq', - value='abc')] - exc = self.assertRaises( - wsme.exc.UnknownArgument, - utils.query_to_kwargs, q, storage.SampleFilter.__init__) - valid_keys = ['message_id', 'meter', 'project', 'resource', - 'search_offset', 'source', 'timestamp', 'user'] - msg = ("unrecognized field in query: %s, " - "valid keys: %s") % (q, valid_keys) - expected_exc = wsme.exc.UnknownArgument('abc', msg) - self.assertEqual(str(expected_exc), str(exc)) - - def test_get_meters_filter_valid_fields(self): - q = [v2_base.Query(field='abc', - op='eq', - value='abc')] - exc = self.assertRaises( - wsme.exc.UnknownArgument, - utils.query_to_kwargs, - q, storage_base.Connection.get_meters, ['limit', 'unique']) - valid_keys = ['project', 'resource', 'source', 'user'] - msg = ("unrecognized field in query: %s, " - "valid keys: %s") % (q, valid_keys) - expected_exc = wsme.exc.UnknownArgument('abc', msg) - self.assertEqual(str(expected_exc), str(exc)) - - def test_get_resources_filter_valid_fields(self): - q = [v2_base.Query(field='abc', - op='eq', - value='abc')] - exc = self.assertRaises( - wsme.exc.UnknownArgument, - utils.query_to_kwargs, - q, storage_base.Connection.get_resources, ['limit']) - valid_keys = ['project', 'resource', - 'search_offset', 'source', 'timestamp', 'user'] - msg = ("unrecognized field in query: %s, " - "valid keys: %s") % (q, valid_keys) - expected_exc = wsme.exc.UnknownArgument('abc', msg) - self.assertEqual(str(expected_exc), str(exc)) diff --git a/ceilometer/tests/unit/api/v2/test_statistics.py b/ceilometer/tests/unit/api/v2/test_statistics.py deleted file mode 100644 index d5198540..00000000 --- a/ceilometer/tests/unit/api/v2/test_statistics.py +++ /dev/null @@ -1,105 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Test statistics objects.""" - -import datetime - -from oslotest import base - -from ceilometer.api.controllers.v2 import meters - - -class TestStatisticsDuration(base.BaseTestCase): - - def setUp(self): - super(TestStatisticsDuration, self).setUp() - - # Create events relative to the range and pretend - # that the intervening events exist. - - self.early1 = datetime.datetime(2012, 8, 27, 7, 0) - self.early2 = datetime.datetime(2012, 8, 27, 17, 0) - - self.start = datetime.datetime(2012, 8, 28, 0, 0) - - self.middle1 = datetime.datetime(2012, 8, 28, 8, 0) - self.middle2 = datetime.datetime(2012, 8, 28, 18, 0) - - self.end = datetime.datetime(2012, 8, 28, 23, 59) - - self.late1 = datetime.datetime(2012, 8, 29, 9, 0) - self.late2 = datetime.datetime(2012, 8, 29, 19, 0) - - def test_nulls(self): - s = meters.Statistics(duration_start=None, - duration_end=None, - start_timestamp=None, - end_timestamp=None) - self.assertIsNone(s.duration_start) - self.assertIsNone(s.duration_end) - self.assertIsNone(s.duration) - - def test_overlap_range_start(self): - s = meters.Statistics(duration_start=self.early1, - duration_end=self.middle1, - start_timestamp=self.start, - end_timestamp=self.end) - self.assertEqual(self.start, s.duration_start) - self.assertEqual(self.middle1, s.duration_end) - self.assertEqual(8 * 60 * 60, s.duration) - - def test_within_range(self): - s = meters.Statistics(duration_start=self.middle1, - duration_end=self.middle2, - start_timestamp=self.start, - end_timestamp=self.end) - self.assertEqual(self.middle1, s.duration_start) - self.assertEqual(self.middle2, s.duration_end) - self.assertEqual(10 * 60 * 60, s.duration) - - def test_within_range_zero_duration(self): - s = meters.Statistics(duration_start=self.middle1, - duration_end=self.middle1, - start_timestamp=self.start, - end_timestamp=self.end) - self.assertEqual(self.middle1, s.duration_start) - self.assertEqual(self.middle1, s.duration_end) - self.assertEqual(0, s.duration) - - def test_overlap_range_end(self): - s = meters.Statistics(duration_start=self.middle2, - duration_end=self.late1, - start_timestamp=self.start, - end_timestamp=self.end) - self.assertEqual(self.middle2, s.duration_start) - self.assertEqual(self.end, s.duration_end) - self.assertEqual(((6 * 60) - 1) * 60, s.duration) - - def test_after_range(self): - s = meters.Statistics(duration_start=self.late1, - duration_end=self.late2, - start_timestamp=self.start, - end_timestamp=self.end) - self.assertIsNone(s.duration_start) - self.assertIsNone(s.duration_end) - self.assertIsNone(s.duration) - - def test_without_timestamp(self): - s = meters.Statistics(duration_start=self.late1, - duration_end=self.late2, - start_timestamp=None, - end_timestamp=None) - self.assertEqual(self.late1, s.duration_start) - self.assertEqual(self.late2, s.duration_end) diff --git a/ceilometer/tests/unit/compute/__init__.py b/ceilometer/tests/unit/compute/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/compute/notifications/__init__.py b/ceilometer/tests/unit/compute/notifications/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/compute/notifications/test_instance.py b/ceilometer/tests/unit/compute/notifications/test_instance.py deleted file mode 100644 index fdd8e512..00000000 --- a/ceilometer/tests/unit/compute/notifications/test_instance.py +++ /dev/null @@ -1,608 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for converters for producing compute counter messages from -notification events. -""" -from oslotest import base - -from ceilometer.compute.notifications import instance -from ceilometer import sample - - -INSTANCE_CREATE_END = { - u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'_context_is_admin': True, - u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': u'10.0.2.15', - u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T20:23:41.425105', - u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'event_type': u'compute.instance.create.end', - u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', - u'payload': {u'created_at': u'2012-05-08 20:23:41', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'fixed_ips': [{u'address': u'10.0.0.2', - u'floating_ips': [], - u'meta': {}, - u'type': u'fixed', - u'version': 4}], - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-08 20:23:47.985999', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', - }, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 20:23:48.028195', -} - -INSTANCE_DELETE_START = { - u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'_context_is_admin': True, - u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': u'10.0.2.15', - u'_context_request_id': u'req-fb3c4546-a2e5-49b7-9fd2-a63bd658bc39', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T20:24:14.547374', - u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'event_type': u'compute.instance.delete.start', - u'message_id': u'a15b94ee-cb8e-4c71-9abe-14aa80055fb4', - u'payload': {u'created_at': u'2012-05-08 20:23:41', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-08 20:23:47', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'deleting', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', - }, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 20:24:14.824743', -} - -INSTANCE_EXISTS = { - u'_context_auth_token': None, - u'_context_is_admin': True, - u'_context_project_id': None, - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': None, - u'_context_request_id': u'req-659a8eb2-4372-4c01-9028-ad6e40b0ed22', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T16:03:43.760204', - u'_context_user_id': None, - u'event_type': u'compute.instance.exists', - u'message_id': u'4b884c03-756d-4c06-8b42-80b6def9d302', - u'payload': {u'audit_period_beginning': u'2012-05-08 15:00:00', - u'audit_period_ending': u'2012-05-08 16:00:00', - u'bandwidth': {}, - u'created_at': u'2012-05-07 22:16:18', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'3a513875-95c9-4012-a3e7-f90c678854e5', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-07 23:01:27', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', - }, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 16:03:44.122481', -} - -INSTANCE_EXISTS_METADATA_LIST = { - u'_context_auth_token': None, - u'_context_is_admin': True, - u'_context_project_id': None, - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': None, - u'_context_request_id': u'req-659a8eb2-4372-4c01-9028-ad6e40b0ed22', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T16:03:43.760204', - u'_context_user_id': None, - u'event_type': u'compute.instance.exists', - u'message_id': u'4b884c03-756d-4c06-8b42-80b6def9d302', - u'payload': {u'audit_period_beginning': u'2012-05-08 15:00:00', - u'audit_period_ending': u'2012-05-08 16:00:00', - u'bandwidth': {}, - u'created_at': u'2012-05-07 22:16:18', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'3a513875-95c9-4012-a3e7-f90c678854e5', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-07 23:01:27', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'metadata': [], - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', - }, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 16:03:44.122481', -} - - -INSTANCE_FINISH_RESIZE_END = { - u'_context_roles': [u'admin'], - u'_context_request_id': u'req-e3f71bb9-e9b9-418b-a9db-a5950c851b25', - u'_context_quota_class': None, - u'event_type': u'compute.instance.finish_resize.end', - u'_context_user_name': u'admin', - u'_context_project_name': u'admin', - u'timestamp': u'2013-01-04 15:10:17.436974', - u'_context_is_admin': True, - u'message_id': u'a2f7770d-b85d-4797-ab10-41407a44368e', - u'_context_auth_token': None, - u'_context_instance_lock_checked': False, - u'_context_project_id': u'cea4b25edb484e5392727181b7721d29', - u'_context_timestamp': u'2013-01-04T15:08:39.162612', - u'_context_read_deleted': u'no', - u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', - u'_context_remote_address': u'10.147.132.184', - u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal', - u'payload': {u'state_description': u'', - u'availability_zone': None, - u'ephemeral_gb': 0, - u'instance_type_id': 5, - u'deleted_at': u'', - u'fixed_ips': [{u'floating_ips': [], - u'label': u'private', - u'version': 4, - u'meta': {}, - u'address': u'10.0.0.3', - u'type': u'fixed'}], - u'memory_mb': 2048, - u'user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', - u'reservation_id': u'r-u3fvim06', - u'hostname': u's1', - u'state': u'resized', - u'launched_at': u'2013-01-04T15:10:14.923939', - u'metadata': {u'metering.server_group': u'Group_A', - u'AutoScalingGroupName': u'tyky-Group_Awste7', - u'metering.foo.bar': u'true'}, - u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', - u'access_ip_v6': None, - u'disk_gb': 20, - u'access_ip_v4': None, - u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', - u'host': u'ip-10-147-132-184.ec2.internal', - u'display_name': u's1', - u'image_ref_url': u'http://10.147.132.184:9292/images/' - 'a130b9d9-e00e-436e-9782-836ccef06e8a', - u'root_gb': 20, - u'tenant_id': u'cea4b25edb484e5392727181b7721d29', - u'created_at': u'2013-01-04T11:21:48.000000', - u'instance_id': u'648e8963-6886-4c3c-98f9-4511c292f86b', - u'instance_type': u'm1.small', - u'vcpus': 1, - u'image_meta': {u'kernel_id': - u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', - u'ramdisk_id': - u'5f23128e-5525-46d8-bc66-9c30cd87141a', - u'base_image_ref': - u'a130b9d9-e00e-436e-9782-836ccef06e8a'}, - u'architecture': None, - u'os_type': None - }, - u'priority': u'INFO' -} - -INSTANCE_RESIZE_REVERT_END = { - u'_context_roles': [u'admin'], - u'_context_request_id': u'req-9da1d714-dabe-42fd-8baa-583e57cd4f1a', - u'_context_quota_class': None, - u'event_type': u'compute.instance.resize.revert.end', - u'_context_user_name': u'admin', - u'_context_project_name': u'admin', - u'timestamp': u'2013-01-04 15:20:32.009532', - u'_context_is_admin': True, - u'message_id': u'c48deeba-d0c3-4154-b3db-47480b52267a', - u'_context_auth_token': None, - u'_context_instance_lock_checked': False, - u'_context_project_id': u'cea4b25edb484e5392727181b7721d29', - u'_context_timestamp': u'2013-01-04T15:19:51.018218', - u'_context_read_deleted': u'no', - u'_context_user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', - u'_context_remote_address': u'10.147.132.184', - u'publisher_id': u'compute.ip-10-147-132-184.ec2.internal', - u'payload': {u'state_description': u'resize_reverting', - u'availability_zone': None, - u'ephemeral_gb': 0, - u'instance_type_id': 2, - u'deleted_at': u'', - u'reservation_id': u'r-u3fvim06', - u'memory_mb': 512, - u'user_id': u'01b83a5e23f24a6fb6cd073c0aee6eed', - u'hostname': u's1', - u'state': u'resized', - u'launched_at': u'2013-01-04T15:10:14.000000', - u'metadata': {u'metering.server_group': u'Group_A', - u'AutoScalingGroupName': u'tyky-Group_A-wste7', - u'metering.foo.bar': u'true'}, - u'ramdisk_id': u'5f23128e-5525-46d8-bc66-9c30cd87141a', - u'access_ip_v6': None, - u'disk_gb': 0, - u'access_ip_v4': None, - u'kernel_id': u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', - u'host': u'ip-10-147-132-184.ec2.internal', - u'display_name': u's1', - u'image_ref_url': u'http://10.147.132.184:9292/images/' - 'a130b9d9-e00e-436e-9782-836ccef06e8a', - u'root_gb': 0, - u'tenant_id': u'cea4b25edb484e5392727181b7721d29', - u'created_at': u'2013-01-04T11:21:48.000000', - u'instance_id': u'648e8963-6886-4c3c-98f9-4511c292f86b', - u'instance_type': u'm1.tiny', - u'vcpus': 1, - u'image_meta': {u'kernel_id': - u'571478e0-d5e7-4c2e-95a5-2bc79443c28a', - u'ramdisk_id': - u'5f23128e-5525-46d8-bc66-9c30cd87141a', - u'base_image_ref': - u'a130b9d9-e00e-436e-9782-836ccef06e8a'}, - u'architecture': None, - u'os_type': None - }, - u'priority': u'INFO' -} - -INSTANCE_SCHEDULED = { - u'_context_request_id': u'req-f28a836a-32bf-4cc3-940a-3515878c181f', - u'_context_quota_class': None, - u'event_type': u'scheduler.run_instance.scheduled', - u'_context_service_catalog': [{ - u'endpoints': [{ - u'adminURL': - u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb', - u'region': u'RegionOne', - u'internalURL': - u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb', - u'id': u'30cb904fdc294eea9b225e06b2d0d4eb', - u'publicURL': - u'http://172.16.12.21:8776/v1/2bd766a095b44486bf07cf7f666997eb'}], - u'endpoints_links': [], - u'type': u'volume', - u'name': u'cinder'}], - u'_context_auth_token': u'TOK', - u'_context_user_id': u'0a757cd896b64b65ba3784afef564116', - u'payload': { - 'instance_id': 'fake-uuid1-1', - u'weighted_host': {u'host': u'eglynn-f19-devstack3', u'weight': 1.0}, - u'request_spec': { - u'num_instances': 1, - u'block_device_mapping': [{ - u'instance_uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', - u'guest_format': None, - u'boot_index': 0, - u'no_device': None, - u'connection_info': None, - u'volume_id': None, - u'volume_size': None, - u'device_name': None, - u'disk_bus': None, - u'image_id': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', - u'source_type': u'image', - u'device_type': u'disk', - u'snapshot_id': None, - u'destination_type': u'local', - u'delete_on_termination': True}], - u'image': { - u'status': u'active', - u'name': u'cirros-0.3.1-x86_64-uec', - u'deleted': False, - u'container_format': u'ami', - u'created_at': u'2014-02-18T13:16:26.000000', - u'disk_format': u'ami', - u'updated_at': u'2014-02-18T13:16:27.000000', - u'properties': { - u'kernel_id': u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', - u'ramdisk_id': u'4999726c-545c-4a9e-bfc0-917459784275'}, - u'min_disk': 0, - u'min_ram': 0, - u'checksum': u'f8a2eeee2dc65b3d9b6e63678955bd83', - u'owner': u'2bd766a095b44486bf07cf7f666997eb', - u'is_public': True, - u'deleted_at': None, - u'id': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', - u'size': 25165824}, - u'instance_type': { - u'root_gb': 1, - u'name': u'm1.tiny', - u'ephemeral_gb': 0, - u'memory_mb': 512, - u'vcpus': 1, - u'extra_specs': {}, - u'swap': 0, - u'rxtx_factor': 1.0, - u'flavorid': u'1', - u'vcpu_weight': None, - u'id': 2}, - u'instance_properties': { - u'vm_state': u'building', - u'availability_zone': None, - u'terminated_at': None, - u'ephemeral_gb': 0, - u'instance_type_id': 2, - u'user_data': None, - u'cleaned': False, - u'vm_mode': None, - u'deleted_at': None, - u'reservation_id': u'r-ven5q6om', - u'id': 15, - u'security_groups': [{ - u'deleted_at': None, - u'user_id': u'0a757cd896b64b65ba3784afef564116', - u'description': u'default', - u'deleted': False, - u'created_at': u'2014-02-19T11:02:31.000000', - u'updated_at': None, - u'project_id': u'2bd766a095b44486bf07cf7f666997eb', - u'id': 1, - u'name': u'default'}], - u'disable_terminate': False, - u'root_device_name': None, - u'display_name': u'new', - u'uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', - u'default_swap_device': None, - u'info_cache': { - u'instance_uuid': u'9206baae-c3b6-41bc-96f2-2c0726ff51c8', - u'deleted': False, - u'created_at': u'2014-03-05T12:44:00.000000', - u'updated_at': None, - u'network_info': [], - u'deleted_at': None}, - u'hostname': u'new', - u'launched_on': None, - u'display_description': u'new', - u'key_data': None, - u'deleted': False, - u'config_drive': u'', - u'power_state': 0, - u'default_ephemeral_device': None, - u'progress': 0, - u'project_id': u'2bd766a095b44486bf07cf7f666997eb', - u'launched_at': None, - u'scheduled_at': None, - u'node': None, - u'ramdisk_id': u'4999726c-545c-4a9e-bfc0-917459784275', - u'access_ip_v6': None, - u'access_ip_v4': None, - u'kernel_id': u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', - u'key_name': None, - u'updated_at': None, - u'host': None, - u'root_gb': 1, - u'user_id': u'0a757cd896b64b65ba3784afef564116', - u'system_metadata': { - u'image_kernel_id': - u'c8794c1a-4158-42cc-9f97-d0d250c9c6a4', - u'image_min_disk': u'1', - u'instance_type_memory_mb': u'512', - u'instance_type_swap': u'0', - u'instance_type_vcpu_weight': None, - u'instance_type_root_gb': u'1', - u'instance_type_name': u'm1.tiny', - u'image_ramdisk_id': - u'4999726c-545c-4a9e-bfc0-917459784275', - u'instance_type_id': u'2', - u'instance_type_ephemeral_gb': u'0', - u'instance_type_rxtx_factor': u'1.0', - u'instance_type_flavorid': u'1', - u'instance_type_vcpus': u'1', - u'image_container_format': u'ami', - u'image_min_ram': u'0', - u'image_disk_format': u'ami', - u'image_base_image_ref': - u'0560ac3f-3bcd-434d-b012-8dd7a212b73b'}, - u'task_state': u'scheduling', - u'shutdown_terminate': False, - u'cell_name': None, - u'ephemeral_key_uuid': None, - u'locked': False, - u'name': u'instance-0000000f', - u'created_at': u'2014-03-05T12:44:00.000000', - u'locked_by': None, - u'launch_index': 0, - u'memory_mb': 512, - u'vcpus': 1, - u'image_ref': u'0560ac3f-3bcd-434d-b012-8dd7a212b73b', - u'architecture': None, - u'auto_disk_config': False, - u'os_type': None, - u'metadata': {u'metering.server_group': u'Group_A', - u'AutoScalingGroupName': u'tyky-Group_Awste7', - u'metering.foo.bar': u'true'}}, - u'security_group': [u'default'], - u'instance_uuids': [u'9206baae-c3b6-41bc-96f2-2c0726ff51c8']}}, - u'priority': u'INFO', - u'_context_is_admin': True, - u'_context_timestamp': u'2014-03-05T12:44:00.135674', - u'publisher_id': u'scheduler.eglynn-f19-devstack3', - u'message_id': u'd6c1ae63-a26b-47c7-8397-8794216e09dd', - u'_context_remote_address': u'172.16.12.21', - u'_context_roles': [u'_member_', u'admin'], - u'timestamp': u'2014-03-05 12:44:00.733758', - u'_context_user': u'0a757cd896b64b65ba3784afef564116', - u'_unique_id': u'2af47cbdde604ff794bb046f3f9db1e2', - u'_context_project_name': u'admin', - u'_context_read_deleted': u'no', - u'_context_tenant': u'2bd766a095b44486bf07cf7f666997eb', - u'_context_instance_lock_checked': False, - u'_context_project_id': u'2bd766a095b44486bf07cf7f666997eb', - u'_context_user_name': u'admin' -} - - -class TestNotifications(base.BaseTestCase): - - def test_process_notification(self): - info = list(instance.Instance(None).process_notification( - INSTANCE_CREATE_END - ))[0] - for name, actual, expected in [ - ('counter_name', info.name, 'instance'), - ('counter_type', info.type, sample.TYPE_GAUGE), - ('counter_volume', info.volume, 1), - ('timestamp', info.timestamp, - INSTANCE_CREATE_END['timestamp']), - ('resource_id', info.resource_id, - INSTANCE_CREATE_END['payload']['instance_id']), - ('instance_type_id', - info.resource_metadata['instance_type_id'], - INSTANCE_CREATE_END['payload']['instance_type_id']), - ('host', info.resource_metadata['host'], - INSTANCE_CREATE_END['publisher_id']), - ]: - self.assertEqual(expected, actual, name) - - @staticmethod - def _find_counter(counters, name): - return filter(lambda counter: counter.name == name, counters)[0] - - def _verify_user_metadata(self, metadata): - self.assertIn('user_metadata', metadata) - user_meta = metadata['user_metadata'] - self.assertEqual('Group_A', user_meta.get('server_group')) - self.assertNotIn('AutoScalingGroupName', user_meta) - self.assertIn('foo_bar', user_meta) - self.assertNotIn('foo.bar', user_meta) - - def test_instance_create_instance(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_CREATE_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(1, c.volume) - - def test_instance_exists_instance(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_EXISTS)) - self.assertEqual(1, len(counters)) - - def test_instance_exists_metadata_list(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_EXISTS_METADATA_LIST)) - self.assertEqual(1, len(counters)) - - def test_instance_delete_instance(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_DELETE_START)) - self.assertEqual(1, len(counters)) - - def test_instance_finish_resize_instance(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(1, c.volume) - self._verify_user_metadata(c.resource_metadata) - - def test_instance_resize_finish_instance(self): - ic = instance.Instance(None) - counters = list(ic.process_notification(INSTANCE_FINISH_RESIZE_END)) - self.assertEqual(1, len(counters)) - c = counters[0] - self.assertEqual(1, c.volume) - self._verify_user_metadata(c.resource_metadata) - - def test_instance_scheduled(self): - ic = instance.InstanceScheduled(None) - - self.assertIn(INSTANCE_SCHEDULED['event_type'], - ic.event_types) - - counters = list(ic.process_notification(INSTANCE_SCHEDULED)) - self.assertEqual(1, len(counters)) - names = [c.name for c in counters] - self.assertEqual(['instance.scheduled'], names) - rid = [c.resource_id for c in counters] - self.assertEqual(['fake-uuid1-1'], rid) diff --git a/ceilometer/tests/unit/compute/pollsters/__init__.py b/ceilometer/tests/unit/compute/pollsters/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/compute/pollsters/base.py b/ceilometer/tests/unit/compute/pollsters/base.py deleted file mode 100644 index 95fd86b2..00000000 --- a/ceilometer/tests/unit/compute/pollsters/base.py +++ /dev/null @@ -1,56 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import mockpatch - -import ceilometer.tests.base as base - - -class TestPollsterBase(base.BaseTestCase): - - def setUp(self): - super(TestPollsterBase, self).setUp() - - self.inspector = mock.Mock() - self.instance = mock.MagicMock() - self.instance.name = 'instance-00000001' - setattr(self.instance, 'OS-EXT-SRV-ATTR:instance_name', - self.instance.name) - setattr(self.instance, 'OS-EXT-STS:vm_state', - 'active') - self.instance.id = 1 - self.instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, - 'ram': 512, 'disk': 20, 'ephemeral': 0} - self.instance.status = 'active' - self.instance.metadata = { - 'fqdn': 'vm_fqdn', - 'metering.stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128', - 'project_cos': 'dev'} - - patch_virt = mockpatch.Patch( - 'ceilometer.compute.virt.inspector.get_hypervisor_inspector', - new=mock.Mock(return_value=self.inspector)) - self.useFixture(patch_virt) - - # as we're having lazy hypervisor inspector singleton object in the - # base compute pollster class, that leads to the fact that we - # need to mock all this class property to avoid context sharing between - # the tests - patch_inspector = mockpatch.Patch( - 'ceilometer.compute.pollsters.BaseComputePollster.inspector', - self.inspector) - self.useFixture(patch_inspector) diff --git a/ceilometer/tests/unit/compute/pollsters/test_cpu.py b/ceilometer/tests/unit/compute/pollsters/test_cpu.py deleted file mode 100644 index bfc3f729..00000000 --- a/ceilometer/tests/unit/compute/pollsters/test_cpu.py +++ /dev/null @@ -1,108 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import time - -import mock - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import cpu -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.tests.unit.compute.pollsters import base - - -class TestCPUPollster(base.TestPollsterBase): - - def setUp(self): - super(TestCPUPollster, self).setUp() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - next_value = iter(( - virt_inspector.CPUStats(time=1 * (10 ** 6), number=2), - virt_inspector.CPUStats(time=3 * (10 ** 6), number=2), - # cpu_time resets on instance restart - virt_inspector.CPUStats(time=2 * (10 ** 6), number=2), - )) - - def inspect_cpus(name): - return next(next_value) - - self.inspector.inspect_cpus = mock.Mock(side_effect=inspect_cpus) - - mgr = manager.AgentManager() - pollster = cpu.CPUPollster() - - def _verify_cpu_metering(expected_time): - cache = {} - samples = list(pollster.get_samples(mgr, cache, [self.instance])) - self.assertEqual(1, len(samples)) - self.assertEqual(set(['cpu']), set([s.name for s in samples])) - self.assertEqual(expected_time, samples[0].volume) - self.assertEqual(2, samples[0].resource_metadata.get('cpu_number')) - # ensure elapsed time between polling cycles is non-zero - time.sleep(0.001) - - _verify_cpu_metering(1 * (10 ** 6)) - _verify_cpu_metering(3 * (10 ** 6)) - _verify_cpu_metering(2 * (10 ** 6)) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples_no_caching(self): - cpu_stats = virt_inspector.CPUStats(time=1 * (10 ** 6), number=2) - self.inspector.inspect_cpus = mock.Mock(return_value=cpu_stats) - - mgr = manager.AgentManager() - pollster = cpu.CPUPollster() - - cache = {} - samples = list(pollster.get_samples(mgr, cache, [self.instance])) - self.assertEqual(1, len(samples)) - self.assertEqual(10 ** 6, samples[0].volume) - self.assertEqual(0, len(cache)) - - -class TestCPUUtilPollster(base.TestPollsterBase): - - def setUp(self): - super(TestCPUUtilPollster, self).setUp() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - next_value = iter(( - virt_inspector.CPUUtilStats(util=40), - virt_inspector.CPUUtilStats(util=60), - )) - - def inspect_cpu_util(name, duration): - return next(next_value) - - self.inspector.inspect_cpu_util = (mock. - Mock(side_effect=inspect_cpu_util)) - - mgr = manager.AgentManager() - pollster = cpu.CPUUtilPollster() - - def _verify_cpu_util_metering(expected_util): - cache = {} - samples = list(pollster.get_samples(mgr, cache, [self.instance])) - self.assertEqual(1, len(samples)) - self.assertEqual(set(['cpu_util']), - set([s.name for s in samples])) - self.assertEqual(expected_util, samples[0].volume) - - _verify_cpu_util_metering(40) - _verify_cpu_util_metering(60) diff --git a/ceilometer/tests/unit/compute/pollsters/test_diskio.py b/ceilometer/tests/unit/compute/pollsters/test_diskio.py deleted file mode 100644 index 0ecafc0f..00000000 --- a/ceilometer/tests/unit/compute/pollsters/test_diskio.py +++ /dev/null @@ -1,361 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# Copyright 2014 Cisco Systems, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mock -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import disk -from ceilometer.compute.virt import inspector as virt_inspector -import ceilometer.tests.base as base - - -class TestBaseDiskIO(base.BaseTestCase): - - TYPE = 'cumulative' - - def setUp(self): - super(TestBaseDiskIO, self).setUp() - - self.inspector = mock.Mock() - self.instance = self._get_fake_instances() - patch_virt = mockpatch.Patch( - 'ceilometer.compute.virt.inspector.get_hypervisor_inspector', - new=mock.Mock(return_value=self.inspector)) - self.useFixture(patch_virt) - - # as we're having lazy hypervisor inspector singleton object in the - # base compute pollster class, that leads to the fact that we - # need to mock all this class property to avoid context sharing between - # the tests - patch_inspector = mockpatch.Patch( - 'ceilometer.compute.pollsters.BaseComputePollster.inspector', - self.inspector) - self.useFixture(patch_inspector) - - @staticmethod - def _get_fake_instances(): - instances = [] - for i in [1, 2]: - instance = mock.MagicMock() - instance.name = 'instance-%s' % i - setattr(instance, 'OS-EXT-SRV-ATTR:instance_name', - instance.name) - instance.id = i - instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, - 'ram': 512, 'disk': 20, 'ephemeral': 0} - instance.status = 'active' - instances.append(instance) - return instances - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples(self, factory, name, expected_count=2): - pollster = factory() - - mgr = manager.AgentManager() - cache = {} - samples = list(pollster.get_samples(mgr, cache, self.instance)) - self.assertIsNotEmpty(samples) - cache_key = getattr(pollster, self.CACHE_KEY) - self.assertIn(cache_key, cache) - for instance in self.instance: - self.assertIn(instance.id, cache[cache_key]) - self.assertEqual(set([name]), set([s.name for s in samples])) - - match = [s for s in samples if s.name == name] - self.assertEqual(len(match), expected_count, - 'missing counter %s' % name) - return match - - def _check_aggregate_samples(self, factory, name, - expected_volume, - expected_device=None): - match = self._check_get_samples(factory, name) - self.assertEqual(expected_volume, match[0].volume) - self.assertEqual(self.TYPE, match[0].type) - if expected_device is not None: - self.assertEqual(set(expected_device), - set(match[0].resource_metadata.get('device'))) - instances = [i.id for i in self.instance] - for m in match: - self.assertIn(m.resource_id, instances) - - def _check_per_device_samples(self, factory, name, - expected_volume, - expected_device=None): - match = self._check_get_samples(factory, name, expected_count=4) - match_dict = {} - for m in match: - match_dict[m.resource_id] = m - for instance in self.instance: - key = "%s-%s" % (instance.id, expected_device) - self.assertEqual(expected_volume, - match_dict[key].volume) - self.assertEqual(self.TYPE, match_dict[key].type) - - self.assertEqual(key, match_dict[key].resource_id) - - -class TestDiskPollsters(TestBaseDiskIO): - - DISKS = [ - (virt_inspector.Disk(device='vda1'), - virt_inspector.DiskStats(read_bytes=1, read_requests=2, - write_bytes=3, write_requests=4, - errors=-1)), - (virt_inspector.Disk(device='vda2'), - virt_inspector.DiskStats(read_bytes=2, read_requests=3, - write_bytes=5, write_requests=7, - errors=-1)), - ] - CACHE_KEY = "CACHE_KEY_DISK" - - def setUp(self): - super(TestDiskPollsters, self).setUp() - self.inspector.inspect_disks = mock.Mock(return_value=self.DISKS) - - def test_disk_read_requests(self): - self._check_aggregate_samples(disk.ReadRequestsPollster, - 'disk.read.requests', 5, - expected_device=['vda1', 'vda2']) - - def test_disk_read_bytes(self): - self._check_aggregate_samples(disk.ReadBytesPollster, - 'disk.read.bytes', 3, - expected_device=['vda1', 'vda2']) - - def test_disk_write_requests(self): - self._check_aggregate_samples(disk.WriteRequestsPollster, - 'disk.write.requests', 11, - expected_device=['vda1', 'vda2']) - - def test_disk_write_bytes(self): - self._check_aggregate_samples(disk.WriteBytesPollster, - 'disk.write.bytes', 8, - expected_device=['vda1', 'vda2']) - - def test_per_disk_read_requests(self): - self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, - 'disk.device.read.requests', 2, - 'vda1') - self._check_per_device_samples(disk.PerDeviceReadRequestsPollster, - 'disk.device.read.requests', 3, - 'vda2') - - def test_per_disk_write_requests(self): - self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, - 'disk.device.write.requests', 4, - 'vda1') - self._check_per_device_samples(disk.PerDeviceWriteRequestsPollster, - 'disk.device.write.requests', 7, - 'vda2') - - def test_per_disk_read_bytes(self): - self._check_per_device_samples(disk.PerDeviceReadBytesPollster, - 'disk.device.read.bytes', 1, - 'vda1') - self._check_per_device_samples(disk.PerDeviceReadBytesPollster, - 'disk.device.read.bytes', 2, - 'vda2') - - def test_per_disk_write_bytes(self): - self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, - 'disk.device.write.bytes', 3, - 'vda1') - self._check_per_device_samples(disk.PerDeviceWriteBytesPollster, - 'disk.device.write.bytes', 5, - 'vda2') - - -class TestDiskRatePollsters(TestBaseDiskIO): - - DISKS = [ - (virt_inspector.Disk(device='disk1'), - virt_inspector.DiskRateStats(1024, 300, 5120, 700)), - - (virt_inspector.Disk(device='disk2'), - virt_inspector.DiskRateStats(2048, 400, 6144, 800)) - ] - TYPE = 'gauge' - CACHE_KEY = "CACHE_KEY_DISK_RATE" - - def setUp(self): - super(TestDiskRatePollsters, self).setUp() - self.inspector.inspect_disk_rates = mock.Mock(return_value=self.DISKS) - - def test_disk_read_bytes_rate(self): - self._check_aggregate_samples(disk.ReadBytesRatePollster, - 'disk.read.bytes.rate', 3072, - expected_device=['disk1', 'disk2']) - - def test_disk_read_requests_rate(self): - self._check_aggregate_samples(disk.ReadRequestsRatePollster, - 'disk.read.requests.rate', 700, - expected_device=['disk1', 'disk2']) - - def test_disk_write_bytes_rate(self): - self._check_aggregate_samples(disk.WriteBytesRatePollster, - 'disk.write.bytes.rate', 11264, - expected_device=['disk1', 'disk2']) - - def test_disk_write_requests_rate(self): - self._check_aggregate_samples(disk.WriteRequestsRatePollster, - 'disk.write.requests.rate', 1500, - expected_device=['disk1', 'disk2']) - - def test_per_disk_read_bytes_rate(self): - self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, - 'disk.device.read.bytes.rate', - 1024, 'disk1') - self._check_per_device_samples(disk.PerDeviceReadBytesRatePollster, - 'disk.device.read.bytes.rate', - 2048, 'disk2') - - def test_per_disk_read_requests_rate(self): - self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, - 'disk.device.read.requests.rate', - 300, 'disk1') - self._check_per_device_samples(disk.PerDeviceReadRequestsRatePollster, - 'disk.device.read.requests.rate', - 400, 'disk2') - - def test_per_disk_write_bytes_rate(self): - self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, - 'disk.device.write.bytes.rate', - 5120, 'disk1') - self._check_per_device_samples(disk.PerDeviceWriteBytesRatePollster, - 'disk.device.write.bytes.rate', 6144, - 'disk2') - - def test_per_disk_write_requests_rate(self): - self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, - 'disk.device.write.requests.rate', 700, - 'disk1') - self._check_per_device_samples(disk.PerDeviceWriteRequestsRatePollster, - 'disk.device.write.requests.rate', 800, - 'disk2') - - -class TestDiskLatencyPollsters(TestBaseDiskIO): - - DISKS = [ - (virt_inspector.Disk(device='disk1'), - virt_inspector.DiskLatencyStats(1000)), - - (virt_inspector.Disk(device='disk2'), - virt_inspector.DiskLatencyStats(2000)) - ] - TYPE = 'gauge' - CACHE_KEY = "CACHE_KEY_DISK_LATENCY" - - def setUp(self): - super(TestDiskLatencyPollsters, self).setUp() - self.inspector.inspect_disk_latency = mock.Mock( - return_value=self.DISKS) - - def test_disk_latency(self): - self._check_aggregate_samples(disk.DiskLatencyPollster, - 'disk.latency', 3) - - def test_per_device_latency(self): - self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, - 'disk.device.latency', 1, 'disk1') - - self._check_per_device_samples(disk.PerDeviceDiskLatencyPollster, - 'disk.device.latency', 2, 'disk2') - - -class TestDiskIOPSPollsters(TestBaseDiskIO): - - DISKS = [ - (virt_inspector.Disk(device='disk1'), - virt_inspector.DiskIOPSStats(10)), - - (virt_inspector.Disk(device='disk2'), - virt_inspector.DiskIOPSStats(20)), - ] - TYPE = 'gauge' - CACHE_KEY = "CACHE_KEY_DISK_IOPS" - - def setUp(self): - super(TestDiskIOPSPollsters, self).setUp() - self.inspector.inspect_disk_iops = mock.Mock(return_value=self.DISKS) - - def test_disk_iops(self): - self._check_aggregate_samples(disk.DiskIOPSPollster, - 'disk.iops', 30) - - def test_per_device_iops(self): - self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster, - 'disk.device.iops', 10, 'disk1') - - self._check_per_device_samples(disk.PerDeviceDiskIOPSPollster, - 'disk.device.iops', 20, 'disk2') - - -class TestDiskInfoPollsters(TestBaseDiskIO): - - DISKS = [ - (virt_inspector.Disk(device='vda1'), - virt_inspector.DiskInfo(capacity=3, allocation=2, physical=1)), - (virt_inspector.Disk(device='vda2'), - virt_inspector.DiskInfo(capacity=4, allocation=3, physical=2)), - ] - TYPE = 'gauge' - CACHE_KEY = "CACHE_KEY_DISK_INFO" - - def setUp(self): - super(TestDiskInfoPollsters, self).setUp() - self.inspector.inspect_disk_info = mock.Mock(return_value=self.DISKS) - - def test_disk_capacity(self): - self._check_aggregate_samples(disk.CapacityPollster, - 'disk.capacity', 7, - expected_device=['vda1', 'vda2']) - - def test_disk_allocation(self): - self._check_aggregate_samples(disk.AllocationPollster, - 'disk.allocation', 5, - expected_device=['vda1', 'vda2']) - - def test_disk_physical(self): - self._check_aggregate_samples(disk.PhysicalPollster, - 'disk.usage', 3, - expected_device=['vda1', 'vda2']) - - def test_per_disk_capacity(self): - self._check_per_device_samples(disk.PerDeviceCapacityPollster, - 'disk.device.capacity', 3, - 'vda1') - self._check_per_device_samples(disk.PerDeviceCapacityPollster, - 'disk.device.capacity', 4, - 'vda2') - - def test_per_disk_allocation(self): - self._check_per_device_samples(disk.PerDeviceAllocationPollster, - 'disk.device.allocation', 2, - 'vda1') - self._check_per_device_samples(disk.PerDeviceAllocationPollster, - 'disk.device.allocation', 3, - 'vda2') - - def test_per_disk_physical(self): - self._check_per_device_samples(disk.PerDevicePhysicalPollster, - 'disk.device.usage', 1, - 'vda1') - self._check_per_device_samples(disk.PerDevicePhysicalPollster, - 'disk.device.usage', 2, - 'vda2') diff --git a/ceilometer/tests/unit/compute/pollsters/test_instance.py b/ceilometer/tests/unit/compute/pollsters/test_instance.py deleted file mode 100644 index f100f543..00000000 --- a/ceilometer/tests/unit/compute/pollsters/test_instance.py +++ /dev/null @@ -1,79 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import fixture as fixture_config - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import instance as pollsters_instance -from ceilometer.tests.unit.compute.pollsters import base - - -class TestInstancePollster(base.TestPollsterBase): - - def setUp(self): - super(TestInstancePollster, self).setUp() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples_instance(self): - mgr = manager.AgentManager() - pollster = pollsters_instance.InstancePollster() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(1, len(samples)) - self.assertEqual('instance', samples[0].name) - self.assertEqual(1, samples[0].resource_metadata['vcpus']) - self.assertEqual(512, samples[0].resource_metadata['memory_mb']) - self.assertEqual(20, samples[0].resource_metadata['disk_gb']) - self.assertEqual(20, samples[0].resource_metadata['root_gb']) - self.assertEqual(0, samples[0].resource_metadata['ephemeral_gb']) - self.assertEqual('active', samples[0].resource_metadata['status']) - self.assertEqual('active', samples[0].resource_metadata['state']) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_reserved_metadata_with_keys(self): - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override('reserved_metadata_keys', ['fqdn']) - - mgr = manager.AgentManager() - pollster = pollsters_instance.InstancePollster() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual({'fqdn': 'vm_fqdn', - 'stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128'}, - samples[0].resource_metadata['user_metadata']) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_reserved_metadata_with_namespace(self): - mgr = manager.AgentManager() - pollster = pollsters_instance.InstancePollster() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual({'stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128'}, - samples[0].resource_metadata['user_metadata']) - - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override('reserved_metadata_namespace', []) - mgr = manager.AgentManager() - pollster = pollsters_instance.InstancePollster() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertNotIn('user_metadata', samples[0].resource_metadata) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_flavor_name_as_metadata_instance_type(self): - mgr = manager.AgentManager() - pollster = pollsters_instance.InstancePollster() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(1, len(samples)) - self.assertEqual('m1.small', - samples[0].resource_metadata['instance_type']) diff --git a/ceilometer/tests/unit/compute/pollsters/test_location_metadata.py b/ceilometer/tests/unit/compute/pollsters/test_location_metadata.py deleted file mode 100644 index f557a415..00000000 --- a/ceilometer/tests/unit/compute/pollsters/test_location_metadata.py +++ /dev/null @@ -1,120 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for the compute pollsters. -""" - -import mock -from oslotest import base -import six - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import util - - -class FauxInstance(object): - - def __init__(self, **kwds): - for name, value in kwds.items(): - setattr(self, name, value) - - def __getitem__(self, key): - return getattr(self, key) - - def get(self, key, default): - try: - return getattr(self, key) - except AttributeError: - return default - - -class TestLocationMetadata(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - self.manager = manager.AgentManager() - super(TestLocationMetadata, self).setUp() - - # Mimics an instance returned from nova api call - self.INSTANCE_PROPERTIES = {'name': 'display name', - 'id': ('234cbe81-4e09-4f64-9b2a-' - '714f6b9046e3'), - 'OS-EXT-SRV-ATTR:instance_name': - 'instance-000001', - 'OS-EXT-AZ:availability_zone': - 'foo-zone', - 'reservation_id': 'reservation id', - 'architecture': 'x86_64', - 'kernel_id': 'kernel id', - 'os_type': 'linux', - 'ramdisk_id': 'ramdisk id', - 'status': 'active', - 'ephemeral_gb': 0, - 'root_gb': 20, - 'disk_gb': 20, - 'image': {'id': 1, - 'links': [{"rel": "bookmark", - 'href': 2}]}, - 'hostId': '1234-5678', - 'OS-EXT-SRV-ATTR:host': 'host-test', - 'flavor': {'name': 'm1.tiny', - 'id': 1, - 'disk': 20, - 'ram': 512, - 'vcpus': 2, - 'ephemeral': 0}, - 'metadata': {'metering.autoscale.group': - 'X' * 512, - 'metering.ephemeral_gb': 42}} - - self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) - - def test_metadata(self): - md = util._get_metadata_from_object(self.instance) - for prop, value in six.iteritems(self.INSTANCE_PROPERTIES): - if prop not in ("metadata"): - # Special cases - if prop == 'name': - prop = 'display_name' - elif prop == 'hostId': - prop = "host" - elif prop == 'OS-EXT-SRV-ATTR:host': - prop = "instance_host" - elif prop == 'OS-EXT-SRV-ATTR:instance_name': - prop = 'name' - elif prop == "id": - prop = "instance_id" - self.assertEqual(value, md[prop]) - user_metadata = md['user_metadata'] - expected = self.INSTANCE_PROPERTIES[ - 'metadata']['metering.autoscale.group'][:256] - self.assertEqual(expected, user_metadata['autoscale_group']) - self.assertEqual(1, len(user_metadata)) - - def test_metadata_empty_image(self): - self.INSTANCE_PROPERTIES['image'] = None - self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) - md = util._get_metadata_from_object(self.instance) - self.assertIsNone(md['image']) - self.assertIsNone(md['image_ref']) - self.assertIsNone(md['image_ref_url']) - - def test_metadata_image_through_conductor(self): - # There should be no links here, should default to None - self.INSTANCE_PROPERTIES['image'] = {'id': 1} - self.instance = FauxInstance(**self.INSTANCE_PROPERTIES) - md = util._get_metadata_from_object(self.instance) - self.assertEqual(1, md['image_ref']) - self.assertIsNone(md['image_ref_url']) diff --git a/ceilometer/tests/unit/compute/pollsters/test_memory.py b/ceilometer/tests/unit/compute/pollsters/test_memory.py deleted file mode 100644 index 7576e1de..00000000 --- a/ceilometer/tests/unit/compute/pollsters/test_memory.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import memory -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.tests.unit.compute.pollsters import base - - -class TestMemoryPollster(base.TestPollsterBase): - - def setUp(self): - super(TestMemoryPollster, self).setUp() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - next_value = iter(( - virt_inspector.MemoryUsageStats(usage=1.0), - virt_inspector.MemoryUsageStats(usage=2.0), - virt_inspector.NoDataException(), - virt_inspector.InstanceShutOffException(), - )) - - def inspect_memory_usage(instance, duration): - value = next(next_value) - if isinstance(value, virt_inspector.MemoryUsageStats): - return value - else: - raise value - - self.inspector.inspect_memory_usage = mock.Mock( - side_effect=inspect_memory_usage) - - mgr = manager.AgentManager() - pollster = memory.MemoryUsagePollster() - - @mock.patch('ceilometer.compute.pollsters.memory.LOG') - def _verify_memory_metering(expected_count, expected_memory_mb, - expected_warnings, mylog): - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(expected_count, len(samples)) - if expected_count > 0: - self.assertEqual(set(['memory.usage']), - set([s.name for s in samples])) - self.assertEqual(expected_memory_mb, samples[0].volume) - else: - self.assertEqual(expected_warnings, mylog.warning.call_count) - self.assertEqual(0, mylog.exception.call_count) - - _verify_memory_metering(1, 1.0, 0) - _verify_memory_metering(1, 2.0, 0) - _verify_memory_metering(0, 0, 1) - _verify_memory_metering(0, 0, 0) - - -class TestResidentMemoryPollster(base.TestPollsterBase): - - def setUp(self): - super(TestResidentMemoryPollster, self).setUp() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - next_value = iter(( - virt_inspector.MemoryResidentStats(resident=1.0), - virt_inspector.MemoryResidentStats(resident=2.0), - virt_inspector.NoDataException(), - virt_inspector.InstanceShutOffException(), - )) - - def inspect_memory_resident(instance, duration): - value = next(next_value) - if isinstance(value, virt_inspector.MemoryResidentStats): - return value - else: - raise value - - self.inspector.inspect_memory_resident = mock.Mock( - side_effect=inspect_memory_resident) - - mgr = manager.AgentManager() - pollster = memory.MemoryResidentPollster() - - @mock.patch('ceilometer.compute.pollsters.memory.LOG') - def _verify_resident_memory_metering(expected_count, - expected_resident_memory_mb, - expected_warnings, mylog): - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(expected_count, len(samples)) - if expected_count > 0: - self.assertEqual(set(['memory.resident']), - set([s.name for s in samples])) - self.assertEqual(expected_resident_memory_mb, - samples[0].volume) - else: - self.assertEqual(expected_warnings, mylog.warning.call_count) - self.assertEqual(0, mylog.exception.call_count) - - _verify_resident_memory_metering(1, 1.0, 0) - _verify_resident_memory_metering(1, 2.0, 0) - _verify_resident_memory_metering(0, 0, 1) - _verify_resident_memory_metering(0, 0, 0) diff --git a/ceilometer/tests/unit/compute/pollsters/test_net.py b/ceilometer/tests/unit/compute/pollsters/test_net.py deleted file mode 100644 index d78a2ec3..00000000 --- a/ceilometer/tests/unit/compute/pollsters/test_net.py +++ /dev/null @@ -1,318 +0,0 @@ -# -# Copyright 2012 eNovance -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from ceilometer.agent import manager -from ceilometer.compute.pollsters import net -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer import sample -from ceilometer.tests.unit.compute.pollsters import base - - -class FauxInstance(object): - - def __init__(self, **kwargs): - for name, value in kwargs.items(): - setattr(self, name, value) - - def __getitem__(self, key): - return getattr(self, key) - - def get(self, key, default): - return getattr(self, key, default) - - -class TestNetPollster(base.TestPollsterBase): - - def setUp(self): - super(TestNetPollster, self).setUp() - self.vnic0 = virt_inspector.Interface( - name='vnet0', - fref='fa163e71ec6e', - mac='fa:16:3e:71:ec:6d', - parameters=dict(ip='10.0.0.2', - projmask='255.255.255.0', - projnet='proj1', - dhcp_server='10.0.0.1')) - stats0 = virt_inspector.InterfaceStats(rx_bytes=1, rx_packets=2, - tx_bytes=3, tx_packets=4) - self.vnic1 = virt_inspector.Interface( - name='vnet1', - fref='fa163e71ec6f', - mac='fa:16:3e:71:ec:6e', - parameters=dict(ip='192.168.0.3', - projmask='255.255.255.0', - projnet='proj2', - dhcp_server='10.0.0.2')) - stats1 = virt_inspector.InterfaceStats(rx_bytes=5, rx_packets=6, - tx_bytes=7, tx_packets=8) - self.vnic2 = virt_inspector.Interface( - name='vnet2', - fref=None, - mac='fa:18:4e:72:fc:7e', - parameters=dict(ip='192.168.0.4', - projmask='255.255.255.0', - projnet='proj3', - dhcp_server='10.0.0.3')) - stats2 = virt_inspector.InterfaceStats(rx_bytes=9, rx_packets=10, - tx_bytes=11, tx_packets=12) - - vnics = [ - (self.vnic0, stats0), - (self.vnic1, stats1), - (self.vnic2, stats2), - ] - self.inspector.inspect_vnics = mock.Mock(return_value=vnics) - - self.INSTANCE_PROPERTIES = {'name': 'display name', - 'OS-EXT-SRV-ATTR:instance_name': - 'instance-000001', - 'OS-EXT-AZ:availability_zone': 'foo-zone', - 'reservation_id': 'reservation id', - 'id': 'instance id', - 'user_id': 'user id', - 'tenant_id': 'tenant id', - 'architecture': 'x86_64', - 'kernel_id': 'kernel id', - 'os_type': 'linux', - 'ramdisk_id': 'ramdisk id', - 'status': 'active', - 'ephemeral_gb': 0, - 'root_gb': 20, - 'disk_gb': 20, - 'image': {'id': 1, - 'links': [{"rel": "bookmark", - 'href': 2}]}, - 'hostId': '1234-5678', - 'OS-EXT-SRV-ATTR:host': 'host-test', - 'flavor': {'disk': 20, - 'ram': 512, - 'name': 'tiny', - 'vcpus': 2, - 'ephemeral': 0}, - 'metadata': {'metering.autoscale.group': - 'X' * 512, - 'metering.foobar': 42}} - - self.faux_instance = FauxInstance(**self.INSTANCE_PROPERTIES) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples(self, factory, expected): - mgr = manager.AgentManager() - pollster = factory() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(3, len(samples)) # one for each nic - self.assertEqual(set([samples[0].name]), - set([s.name for s in samples])) - - def _verify_vnic_metering(ip, expected_volume, expected_rid): - match = [s for s in samples - if s.resource_metadata['parameters']['ip'] == ip - ] - self.assertEqual(len(match), 1, 'missing ip %s' % ip) - self.assertEqual(expected_volume, match[0].volume) - self.assertEqual('cumulative', match[0].type) - self.assertEqual(expected_rid, match[0].resource_id) - - for ip, volume, rid in expected: - _verify_vnic_metering(ip, volume, rid) - - def test_incoming_bytes(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.IncomingBytesPollster, - [('10.0.0.2', 1, self.vnic0.fref), - ('192.168.0.3', 5, self.vnic1.fref), - ('192.168.0.4', 9, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) - - def test_outgoing_bytes(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.OutgoingBytesPollster, - [('10.0.0.2', 3, self.vnic0.fref), - ('192.168.0.3', 7, self.vnic1.fref), - ('192.168.0.4', 11, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) - - def test_incoming_packets(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.IncomingPacketsPollster, - [('10.0.0.2', 2, self.vnic0.fref), - ('192.168.0.3', 6, self.vnic1.fref), - ('192.168.0.4', 10, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) - - def test_outgoing_packets(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.OutgoingPacketsPollster, - [('10.0.0.2', 4, self.vnic0.fref), - ('192.168.0.3', 8, self.vnic1.fref), - ('192.168.0.4', 12, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_metadata(self): - factory = net.OutgoingBytesPollster - pollster = factory() - sm = pollster.make_vnic_sample(self.faux_instance, - name='network.outgoing.bytes', - type=sample.TYPE_CUMULATIVE, - unit='B', - volume=100, - vnic_data=self.vnic0) - - user_metadata = sm.resource_metadata['user_metadata'] - expected = self.INSTANCE_PROPERTIES[ - 'metadata']['metering.autoscale.group'][:256] - self.assertEqual(expected, user_metadata['autoscale_group']) - self.assertEqual(2, len(user_metadata)) - - -class TestNetPollsterCache(base.TestPollsterBase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples_cache(self, factory): - vnic0 = virt_inspector.Interface( - name='vnet0', - fref='fa163e71ec6e', - mac='fa:16:3e:71:ec:6d', - parameters=dict(ip='10.0.0.2', - projmask='255.255.255.0', - projnet='proj1', - dhcp_server='10.0.0.1')) - stats0 = virt_inspector.InterfaceStats(rx_bytes=1, rx_packets=2, - tx_bytes=3, tx_packets=4) - vnics = [(vnic0, stats0)] - - mgr = manager.AgentManager() - pollster = factory() - cache = { - pollster.CACHE_KEY_VNIC: { - self.instance.id: vnics, - }, - } - samples = list(pollster.get_samples(mgr, cache, [self.instance])) - self.assertEqual(1, len(samples)) - - def test_incoming_bytes(self): - self._check_get_samples_cache(net.IncomingBytesPollster) - - def test_outgoing_bytes(self): - self._check_get_samples_cache(net.OutgoingBytesPollster) - - def test_incoming_packets(self): - self._check_get_samples_cache(net.IncomingPacketsPollster) - - def test_outgoing_packets(self): - self._check_get_samples_cache(net.OutgoingPacketsPollster) - - -class TestNetRatesPollster(base.TestPollsterBase): - - def setUp(self): - super(TestNetRatesPollster, self).setUp() - self.vnic0 = virt_inspector.Interface( - name='vnet0', - fref='fa163e71ec6e', - mac='fa:16:3e:71:ec:6d', - parameters=dict(ip='10.0.0.2', - projmask='255.255.255.0', - projnet='proj1', - dhcp_server='10.0.0.1')) - stats0 = virt_inspector.InterfaceRateStats(rx_bytes_rate=1, - tx_bytes_rate=2) - self.vnic1 = virt_inspector.Interface( - name='vnet1', - fref='fa163e71ec6f', - mac='fa:16:3e:71:ec:6e', - parameters=dict(ip='192.168.0.3', - projmask='255.255.255.0', - projnet='proj2', - dhcp_server='10.0.0.2')) - stats1 = virt_inspector.InterfaceRateStats(rx_bytes_rate=3, - tx_bytes_rate=4) - self.vnic2 = virt_inspector.Interface( - name='vnet2', - fref=None, - mac='fa:18:4e:72:fc:7e', - parameters=dict(ip='192.168.0.4', - projmask='255.255.255.0', - projnet='proj3', - dhcp_server='10.0.0.3')) - stats2 = virt_inspector.InterfaceRateStats(rx_bytes_rate=5, - tx_bytes_rate=6) - - vnics = [ - (self.vnic0, stats0), - (self.vnic1, stats1), - (self.vnic2, stats2), - ] - self.inspector.inspect_vnic_rates = mock.Mock(return_value=vnics) - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples(self, factory, expected): - mgr = manager.AgentManager() - pollster = factory() - samples = list(pollster.get_samples(mgr, {}, [self.instance])) - self.assertEqual(3, len(samples)) # one for each nic - self.assertEqual(set([samples[0].name]), - set([s.name for s in samples])) - - def _verify_vnic_metering(ip, expected_volume, expected_rid): - match = [s for s in samples - if s.resource_metadata['parameters']['ip'] == ip - ] - self.assertEqual(1, len(match), 'missing ip %s' % ip) - self.assertEqual(expected_volume, match[0].volume) - self.assertEqual('gauge', match[0].type) - self.assertEqual(expected_rid, match[0].resource_id) - - for ip, volume, rid in expected: - _verify_vnic_metering(ip, volume, rid) - - def test_incoming_bytes_rate(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.IncomingBytesRatePollster, - [('10.0.0.2', 1, self.vnic0.fref), - ('192.168.0.3', 3, self.vnic1.fref), - ('192.168.0.4', 5, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) - - def test_outgoing_bytes_rate(self): - instance_name_id = "%s-%s" % (self.instance.name, self.instance.id) - self._check_get_samples( - net.OutgoingBytesRatePollster, - [('10.0.0.2', 2, self.vnic0.fref), - ('192.168.0.3', 4, self.vnic1.fref), - ('192.168.0.4', 6, - "%s-%s" % (instance_name_id, self.vnic2.name)), - ], - ) diff --git a/ceilometer/tests/unit/compute/test_discovery.py b/ceilometer/tests/unit/compute/test_discovery.py deleted file mode 100644 index da5b0488..00000000 --- a/ceilometer/tests/unit/compute/test_discovery.py +++ /dev/null @@ -1,99 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime - -import iso8601 -import mock -from oslo_config import fixture as fixture_config -from oslotest import mockpatch - -from ceilometer.compute import discovery -import ceilometer.tests.base as base - - -class TestDiscovery(base.BaseTestCase): - - def setUp(self): - super(TestDiscovery, self).setUp() - - self.instance = mock.MagicMock() - self.instance.name = 'instance-00000001' - setattr(self.instance, 'OS-EXT-SRV-ATTR:instance_name', - self.instance.name) - setattr(self.instance, 'OS-EXT-STS:vm_state', - 'active') - self.instance.id = 1 - self.instance.flavor = {'name': 'm1.small', 'id': 2, 'vcpus': 1, - 'ram': 512, 'disk': 20, 'ephemeral': 0} - self.instance.status = 'active' - self.instance.metadata = { - 'fqdn': 'vm_fqdn', - 'metering.stack': '2cadc4b4-8789-123c-b4eg-edd2f0a9c128', - 'project_cos': 'dev'} - - # as we're having lazy hypervisor inspector singleton object in the - # base compute pollster class, that leads to the fact that we - # need to mock all this class property to avoid context sharing between - # the tests - self.client = mock.MagicMock() - self.client.instance_get_all_by_host.return_value = [self.instance] - patch_client = mockpatch.Patch('ceilometer.nova_client.Client', - return_value=self.client) - self.useFixture(patch_client) - - self.utc_now = mock.MagicMock( - return_value=datetime.datetime(2016, 1, 1, - tzinfo=iso8601.iso8601.UTC)) - patch_timeutils = mockpatch.Patch('oslo_utils.timeutils.utcnow', - self.utc_now) - self.useFixture(patch_timeutils) - - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.set_override('host', 'test') - - def test_normal_discovery(self): - dsc = discovery.InstanceDiscovery() - resources = dsc.discover(mock.MagicMock()) - - self.assertEqual(1, len(resources)) - self.assertEqual(1, list(resources)[0].id) - - self.client.instance_get_all_by_host.assert_called_once_with( - 'test', None) - - resources = dsc.discover(mock.MagicMock()) - self.assertEqual(1, len(resources)) - self.assertEqual(1, list(resources)[0].id) - self.client.instance_get_all_by_host.assert_called_with( - self.CONF.host, "2016-01-01T00:00:00+00:00") - - def test_discovery_with_resource_update_interval(self): - self.CONF.set_override("resource_update_interval", 600, - group="compute") - dsc = discovery.InstanceDiscovery() - dsc.last_run = datetime.datetime(2016, 1, 1, - tzinfo=iso8601.iso8601.UTC) - - self.utc_now.return_value = datetime.datetime( - 2016, 1, 1, minute=5, tzinfo=iso8601.iso8601.UTC) - resources = dsc.discover(mock.MagicMock()) - self.assertEqual(0, len(resources)) - self.client.instance_get_all_by_host.assert_not_called() - - self.utc_now.return_value = datetime.datetime( - 2016, 1, 1, minute=20, tzinfo=iso8601.iso8601.UTC) - resources = dsc.discover(mock.MagicMock()) - self.assertEqual(1, len(resources)) - self.assertEqual(1, list(resources)[0].id) - self.client.instance_get_all_by_host.assert_called_once_with( - self.CONF.host, "2016-01-01T00:00:00+00:00") diff --git a/ceilometer/tests/unit/compute/virt/__init__.py b/ceilometer/tests/unit/compute/virt/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/compute/virt/hyperv/__init__.py b/ceilometer/tests/unit/compute/virt/hyperv/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py b/ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py deleted file mode 100644 index 7df4f2e4..00000000 --- a/ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright 2013 Cloudbase Solutions Srl -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for Hyper-V inspector. -""" - -import sys - -import mock -from os_win import exceptions as os_win_exc -from oslo_utils import units -from oslotest import base - -from ceilometer.compute.virt.hyperv import inspector as hyperv_inspector -from ceilometer.compute.virt import inspector as virt_inspector - - -class TestHyperVInspection(base.BaseTestCase): - - @mock.patch.object(hyperv_inspector, 'utilsfactory', mock.MagicMock()) - @mock.patch.object(hyperv_inspector.HyperVInspector, - '_compute_host_max_cpu_clock') - def setUp(self, mock_compute_host_cpu_clock): - self._inspector = hyperv_inspector.HyperVInspector() - self._inspector._utils = mock.MagicMock() - - super(TestHyperVInspection, self).setUp() - - def test_converted_exception(self): - self._inspector._utils.get_cpu_metrics.side_effect = ( - os_win_exc.OSWinException) - self.assertRaises(virt_inspector.InspectorException, - self._inspector.inspect_cpus, mock.sentinel.instance) - - self._inspector._utils.get_cpu_metrics.side_effect = ( - os_win_exc.HyperVException) - self.assertRaises(virt_inspector.InspectorException, - self._inspector.inspect_cpus, mock.sentinel.instance) - - self._inspector._utils.get_cpu_metrics.side_effect = ( - os_win_exc.NotFound(resource='foofoo')) - self.assertRaises(virt_inspector.InstanceNotFoundException, - self._inspector.inspect_cpus, mock.sentinel.instance) - - def test_assert_original_traceback_maintained(self): - def bar(self): - foo = "foofoo" - raise os_win_exc.NotFound(resource=foo) - - self._inspector._utils.get_cpu_metrics.side_effect = bar - try: - self._inspector.inspect_cpus(mock.sentinel.instance) - self.fail("Test expected exception, but it was not raised.") - except virt_inspector.InstanceNotFoundException: - # exception has been raised as expected. - _, _, trace = sys.exc_info() - while trace.tb_next: - # iterate until the original exception source, bar. - trace = trace.tb_next - - # original frame will contain the 'foo' variable. - self.assertEqual('foofoo', trace.tb_frame.f_locals['foo']) - - @mock.patch.object(hyperv_inspector, 'utilsfactory') - def test_compute_host_max_cpu_clock(self, mock_utilsfactory): - mock_cpu = {'MaxClockSpeed': 1000} - hostutils = mock_utilsfactory.get_hostutils.return_value.get_cpus_info - hostutils.return_value = [mock_cpu, mock_cpu] - - cpu_clock = self._inspector._compute_host_max_cpu_clock() - self.assertEqual(2000.0, cpu_clock) - - def test_inspect_cpus(self): - fake_instance_name = 'fake_instance_name' - fake_cpu_clock_used = 2000 - fake_cpu_count = 3000 - fake_uptime = 4000 - - self._inspector._host_max_cpu_clock = 4000.0 - fake_cpu_percent_used = (fake_cpu_clock_used / - self._inspector._host_max_cpu_clock) - fake_cpu_time = (int(fake_uptime * fake_cpu_percent_used) * - 1000) - self._inspector._utils.get_cpu_metrics.return_value = ( - fake_cpu_clock_used, fake_cpu_count, fake_uptime) - - cpu_stats = self._inspector.inspect_cpus(fake_instance_name) - - self.assertEqual(fake_cpu_count, cpu_stats.number) - self.assertEqual(fake_cpu_time, cpu_stats.time) - - def test_inspect_memory_usage(self): - fake_usage = self._inspector._utils.get_memory_metrics.return_value - usage = self._inspector.inspect_memory_usage( - mock.sentinel.FAKE_INSTANCE, mock.sentinel.FAKE_DURATION) - self.assertEqual(fake_usage, usage.usage) - - def test_inspect_vnics(self): - fake_instance_name = 'fake_instance_name' - fake_rx_mb = 1000 - fake_tx_mb = 2000 - fake_element_name = 'fake_element_name' - fake_address = 'fake_address' - - self._inspector._utils.get_vnic_metrics.return_value = [{ - 'rx_mb': fake_rx_mb, - 'tx_mb': fake_tx_mb, - 'element_name': fake_element_name, - 'address': fake_address}] - - inspected_vnics = list(self._inspector.inspect_vnics( - fake_instance_name)) - - self.assertEqual(1, len(inspected_vnics)) - self.assertEqual(2, len(inspected_vnics[0])) - - inspected_vnic, inspected_stats = inspected_vnics[0] - - self.assertEqual(fake_element_name, inspected_vnic.name) - self.assertEqual(fake_address, inspected_vnic.mac) - - self.assertEqual(fake_rx_mb * units.Mi, inspected_stats.rx_bytes) - self.assertEqual(fake_tx_mb * units.Mi, inspected_stats.tx_bytes) - - def test_inspect_disks(self): - fake_instance_name = 'fake_instance_name' - fake_read_mb = 1000 - fake_write_mb = 2000 - fake_instance_id = "fake_fake_instance_id" - fake_host_resource = "fake_host_resource" - - self._inspector._utils.get_disk_metrics.return_value = [{ - 'read_mb': fake_read_mb, - 'write_mb': fake_write_mb, - 'instance_id': fake_instance_id, - 'host_resource': fake_host_resource}] - - inspected_disks = list(self._inspector.inspect_disks( - fake_instance_name)) - - self.assertEqual(1, len(inspected_disks)) - self.assertEqual(2, len(inspected_disks[0])) - - inspected_disk, inspected_stats = inspected_disks[0] - - self.assertEqual(fake_instance_id, inspected_disk.device) - - self.assertEqual(fake_read_mb * units.Mi, inspected_stats.read_bytes) - self.assertEqual(fake_write_mb * units.Mi, inspected_stats.write_bytes) - - def test_inspect_disk_latency(self): - fake_instance_name = mock.sentinel.INSTANCE_NAME - fake_disk_latency = mock.sentinel.DISK_LATENCY - fake_instance_id = mock.sentinel.INSTANCE_ID - - self._inspector._utils.get_disk_latency_metrics.return_value = [{ - 'disk_latency': fake_disk_latency, - 'instance_id': fake_instance_id}] - - inspected_disks = list(self._inspector.inspect_disk_latency( - fake_instance_name)) - - self.assertEqual(1, len(inspected_disks)) - self.assertEqual(2, len(inspected_disks[0])) - - inspected_disk, inspected_stats = inspected_disks[0] - - self.assertEqual(fake_instance_id, inspected_disk.device) - self.assertEqual(fake_disk_latency, inspected_stats.disk_latency) - - def test_inspect_disk_iops_count(self): - fake_instance_name = mock.sentinel.INSTANCE_NAME - fake_disk_iops_count = mock.sentinel.DISK_IOPS_COUNT - fake_instance_id = mock.sentinel.INSTANCE_ID - - self._inspector._utils.get_disk_iops_count.return_value = [{ - 'iops_count': fake_disk_iops_count, - 'instance_id': fake_instance_id}] - - inspected_disks = list(self._inspector.inspect_disk_iops( - fake_instance_name)) - - self.assertEqual(1, len(inspected_disks)) - self.assertEqual(2, len(inspected_disks[0])) - - inspected_disk, inspected_stats = inspected_disks[0] - - self.assertEqual(fake_instance_id, inspected_disk.device) - self.assertEqual(fake_disk_iops_count, inspected_stats.iops_count) diff --git a/ceilometer/tests/unit/compute/virt/libvirt/__init__.py b/ceilometer/tests/unit/compute/virt/libvirt/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py b/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py deleted file mode 100644 index 16220cfa..00000000 --- a/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py +++ /dev/null @@ -1,374 +0,0 @@ -# Copyright 2012 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for libvirt inspector. -""" - -try: - import contextlib2 as contextlib # for Python < 3.3 -except ImportError: - import contextlib - -import fixtures -import mock -from oslo_utils import units -from oslotest import base - -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.compute.virt.libvirt import inspector as libvirt_inspector - - -class TestLibvirtInspection(base.BaseTestCase): - - class fakeLibvirtError(Exception): - pass - - def setUp(self): - super(TestLibvirtInspection, self).setUp() - - class VMInstance(object): - id = 'ff58e738-12f4-4c58-acde-77617b68da56' - name = 'instance-00000001' - self.instance = VMInstance - self.inspector = libvirt_inspector.LibvirtInspector() - self.inspector.connection = mock.Mock() - libvirt_inspector.libvirt = mock.Mock() - libvirt_inspector.libvirt.VIR_DOMAIN_SHUTOFF = 5 - libvirt_inspector.libvirt.libvirtError = self.fakeLibvirtError - self.domain = mock.Mock() - self.addCleanup(mock.patch.stopall) - - def test_inspect_cpus(self): - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(self.inspector.connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - 2, 999999))) - cpu_info = self.inspector.inspect_cpus(self.instance) - self.assertEqual(2, cpu_info.number) - self.assertEqual(999999, cpu_info.time) - - def test_inspect_cpus_with_domain_shutoff(self): - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(5, 0, 0, - 2, 999999)): - self.assertRaises(virt_inspector.InstanceShutOffException, - self.inspector.inspect_cpus, - self.instance) - - def test_inspect_vnics(self): - dom_xml = """ - - - - - - - -
- - - - - - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - """ - - interface_stats = { - 'vnet0': (1, 2, 0, 0, 3, 4, 0, 0), - 'vnet1': (5, 6, 0, 0, 7, 8, 0, 0), - 'vnet2': (9, 10, 0, 0, 11, 12, 0, 0), - } - interfaceStats = interface_stats.__getitem__ - - connection = self.inspector.connection - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', - return_value=dom_xml)) - stack.enter_context(mock.patch.object(self.domain, - 'interfaceStats', - side_effect=interfaceStats)) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - 2, 999999))) - interfaces = list(self.inspector.inspect_vnics(self.instance)) - - self.assertEqual(3, len(interfaces)) - vnic0, info0 = interfaces[0] - self.assertEqual('vnet0', vnic0.name) - self.assertEqual('fa:16:3e:71:ec:6d', vnic0.mac) - self.assertEqual('nova-instance-00000001-fa163e71ec6d', vnic0.fref) - self.assertEqual('255.255.255.0', vnic0.parameters.get('projmask')) - self.assertEqual('10.0.0.2', vnic0.parameters.get('ip')) - self.assertEqual('10.0.0.0', vnic0.parameters.get('projnet')) - self.assertEqual('10.0.0.1', vnic0.parameters.get('dhcpserver')) - self.assertEqual(1, info0.rx_bytes) - self.assertEqual(2, info0.rx_packets) - self.assertEqual(3, info0.tx_bytes) - self.assertEqual(4, info0.tx_packets) - - vnic1, info1 = interfaces[1] - self.assertEqual('vnet1', vnic1.name) - self.assertEqual('fa:16:3e:71:ec:6e', vnic1.mac) - self.assertEqual('nova-instance-00000001-fa163e71ec6e', vnic1.fref) - self.assertEqual('255.255.255.0', vnic1.parameters.get('projmask')) - self.assertEqual('192.168.0.2', vnic1.parameters.get('ip')) - self.assertEqual('192.168.0.0', vnic1.parameters.get('projnet')) - self.assertEqual('192.168.0.1', vnic1.parameters.get('dhcpserver')) - self.assertEqual(5, info1.rx_bytes) - self.assertEqual(6, info1.rx_packets) - self.assertEqual(7, info1.tx_bytes) - self.assertEqual(8, info1.tx_packets) - - vnic2, info2 = interfaces[2] - self.assertEqual('vnet2', vnic2.name) - self.assertEqual('fa:16:3e:96:33:f0', vnic2.mac) - self.assertIsNone(vnic2.fref) - self.assertEqual(dict(), vnic2.parameters) - self.assertEqual(9, info2.rx_bytes) - self.assertEqual(10, info2.rx_packets) - self.assertEqual(11, info2.tx_bytes) - self.assertEqual(12, info2.tx_packets) - - def test_inspect_vnics_with_domain_shutoff(self): - connection = self.inspector.connection - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(5, 0, 0, - 2, 999999))) - inspect = self.inspector.inspect_vnics - self.assertRaises(virt_inspector.InstanceShutOffException, - list, inspect(self.instance)) - - def test_inspect_disks(self): - dom_xml = """ - - - - - - - -
- - - - """ - - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(self.inspector.connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', - return_value=dom_xml)) - stack.enter_context(mock.patch.object(self.domain, 'blockStats', - return_value=(1, 2, 3, - 4, -1))) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - 2, 999999))) - disks = list(self.inspector.inspect_disks(self.instance)) - - self.assertEqual(1, len(disks)) - disk0, info0 = disks[0] - self.assertEqual('vda', disk0.device) - self.assertEqual(1, info0.read_requests) - self.assertEqual(2, info0.read_bytes) - self.assertEqual(3, info0.write_requests) - self.assertEqual(4, info0.write_bytes) - - def test_inspect_disks_with_domain_shutoff(self): - connection = self.inspector.connection - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(5, 0, 0, - 2, 999999))) - inspect = self.inspector.inspect_disks - self.assertRaises(virt_inspector.InstanceShutOffException, - list, inspect(self.instance)) - - def test_inspect_memory_usage(self): - fake_memory_stats = {'available': 51200, 'unused': 25600} - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(0, 0, 51200, - 2, 999999)): - with mock.patch.object(self.domain, 'memoryStats', - return_value=fake_memory_stats): - memory = self.inspector.inspect_memory_usage( - self.instance) - self.assertEqual(25600 / units.Ki, memory.usage) - - def test_inspect_disk_info(self): - dom_xml = """ - - - - - - - -
- - - - """ - - with contextlib.ExitStack() as stack: - stack.enter_context(mock.patch.object(self.inspector.connection, - 'lookupByUUIDString', - return_value=self.domain)) - stack.enter_context(mock.patch.object(self.domain, 'XMLDesc', - return_value=dom_xml)) - stack.enter_context(mock.patch.object(self.domain, 'blockInfo', - return_value=(1, 2, 3, - -1))) - stack.enter_context(mock.patch.object(self.domain, 'info', - return_value=(0, 0, 0, - 2, 999999))) - disks = list(self.inspector.inspect_disk_info(self.instance)) - - self.assertEqual(1, len(disks)) - disk0, info0 = disks[0] - self.assertEqual('vda', disk0.device) - self.assertEqual(1, info0.capacity) - self.assertEqual(2, info0.allocation) - self.assertEqual(3, info0.physical) - - def test_inspect_memory_usage_with_domain_shutoff(self): - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(5, 0, 0, - 2, 999999)): - self.assertRaises(virt_inspector.InstanceShutOffException, - self.inspector.inspect_memory_usage, - self.instance) - - def test_inspect_memory_usage_with_empty_stats(self): - connection = self.inspector.connection - with mock.patch.object(connection, 'lookupByUUIDString', - return_value=self.domain): - with mock.patch.object(self.domain, 'info', - return_value=(0, 0, 51200, - 2, 999999)): - with mock.patch.object(self.domain, 'memoryStats', - return_value={}): - self.assertRaises(virt_inspector.NoDataException, - self.inspector.inspect_memory_usage, - self.instance) - - -class TestLibvirtInspectionWithError(base.BaseTestCase): - - class fakeLibvirtError(Exception): - pass - - def setUp(self): - super(TestLibvirtInspectionWithError, self).setUp() - self.inspector = libvirt_inspector.LibvirtInspector() - self.useFixture(fixtures.MonkeyPatch( - 'ceilometer.compute.virt.libvirt.inspector.' - 'LibvirtInspector._get_connection', - self._dummy_get_connection)) - libvirt_inspector.libvirt = mock.Mock() - libvirt_inspector.libvirt.libvirtError = self.fakeLibvirtError - - @staticmethod - def _dummy_get_connection(*args, **kwargs): - raise Exception('dummy') - - def test_inspect_unknown_error(self): - self.assertRaises(virt_inspector.InspectorException, - self.inspector.inspect_cpus, 'foo') - - -class TestLibvirtInitWithError(base.BaseTestCase): - - def setUp(self): - super(TestLibvirtInitWithError, self).setUp() - self.inspector = libvirt_inspector.LibvirtInspector() - libvirt_inspector.libvirt = mock.Mock() - - @mock.patch('ceilometer.compute.virt.libvirt.inspector.' - 'LibvirtInspector._get_connection', - mock.Mock(return_value=None)) - def test_init_error(self): - self.assertRaises(virt_inspector.NoSanityException, - self.inspector.check_sanity) - - @mock.patch('ceilometer.compute.virt.libvirt.inspector.' - 'LibvirtInspector._get_connection', - mock.Mock(side_effect=virt_inspector.NoDataException)) - def test_init_exception(self): - self.assertRaises(virt_inspector.NoDataException, - self.inspector.check_sanity) diff --git a/ceilometer/tests/unit/compute/virt/vmware/__init__.py b/ceilometer/tests/unit/compute/virt/vmware/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/compute/virt/vmware/test_inspector.py b/ceilometer/tests/unit/compute/virt/vmware/test_inspector.py deleted file mode 100644 index a6ca74a6..00000000 --- a/ceilometer/tests/unit/compute/virt/vmware/test_inspector.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests for VMware vSphere inspector. -""" - -import mock -from oslo_vmware import api -from oslotest import base - -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.compute.virt.vmware import inspector as vsphere_inspector - - -class TestVsphereInspection(base.BaseTestCase): - - def setUp(self): - api_session = api.VMwareAPISession("test_server", "test_user", - "test_password", 0, None, - create_session=False, port=7443) - vsphere_inspector.get_api_session = mock.Mock( - return_value=api_session) - self._inspector = vsphere_inspector.VsphereInspector() - self._inspector._ops = mock.MagicMock() - - super(TestVsphereInspection, self).setUp() - - def test_inspect_memory_usage(self): - fake_instance_moid = 'fake_instance_moid' - fake_instance_id = 'fake_instance_id' - fake_perf_counter_id = 'fake_perf_counter_id' - fake_memory_value = 1024.0 - fake_stat = virt_inspector.MemoryUsageStats(usage=1.0) - - def construct_mock_instance_object(fake_instance_id): - instance_object = mock.MagicMock() - instance_object.id = fake_instance_id - return instance_object - - fake_instance = construct_mock_instance_object(fake_instance_id) - self._inspector._ops.get_vm_moid.return_value = fake_instance_moid - (self._inspector._ops. - get_perf_counter_id.return_value) = fake_perf_counter_id - (self._inspector._ops.query_vm_aggregate_stats. - return_value) = fake_memory_value - memory_stat = self._inspector.inspect_memory_usage(fake_instance) - self.assertEqual(fake_stat, memory_stat) - - def test_inspect_cpu_util(self): - fake_instance_moid = 'fake_instance_moid' - fake_instance_id = 'fake_instance_id' - fake_perf_counter_id = 'fake_perf_counter_id' - fake_cpu_util_value = 60 - fake_stat = virt_inspector.CPUUtilStats(util=60) - - def construct_mock_instance_object(fake_instance_id): - instance_object = mock.MagicMock() - instance_object.id = fake_instance_id - return instance_object - - fake_instance = construct_mock_instance_object(fake_instance_id) - self._inspector._ops.get_vm_moid.return_value = fake_instance_moid - (self._inspector._ops.get_perf_counter_id. - return_value) = fake_perf_counter_id - (self._inspector._ops.query_vm_aggregate_stats. - return_value) = fake_cpu_util_value * 100 - cpu_util_stat = self._inspector.inspect_cpu_util(fake_instance) - self.assertEqual(fake_stat, cpu_util_stat) - - def test_inspect_vnic_rates(self): - - # construct test data - test_vm_moid = "vm-21" - vnic1 = "vnic-1" - vnic2 = "vnic-2" - counter_name_to_id_map = { - vsphere_inspector.VC_NETWORK_RX_COUNTER: 1, - vsphere_inspector.VC_NETWORK_TX_COUNTER: 2 - } - counter_id_to_stats_map = { - 1: {vnic1: 1, vnic2: 3}, - 2: {vnic1: 2, vnic2: 4}, - } - - def get_counter_id_side_effect(counter_full_name): - return counter_name_to_id_map[counter_full_name] - - def query_stat_side_effect(vm_moid, counter_id, duration): - # assert inputs - self.assertEqual(test_vm_moid, vm_moid) - self.assertIn(counter_id, counter_id_to_stats_map) - return counter_id_to_stats_map[counter_id] - - # configure vsphere operations mock with the test data - ops_mock = self._inspector._ops - ops_mock.get_vm_moid.return_value = test_vm_moid - ops_mock.get_perf_counter_id.side_effect = get_counter_id_side_effect - ops_mock.query_vm_device_stats.side_effect = query_stat_side_effect - result = self._inspector.inspect_vnic_rates(mock.MagicMock()) - - # validate result - expected_stats = { - vnic1: virt_inspector.InterfaceRateStats(1024, 2048), - vnic2: virt_inspector.InterfaceRateStats(3072, 4096) - } - - for vnic, rates_info in result: - self.assertEqual(expected_stats[vnic.name], rates_info) - - def test_inspect_disk_rates(self): - - # construct test data - test_vm_moid = "vm-21" - disk1 = "disk-1" - disk2 = "disk-2" - counter_name_to_id_map = { - vsphere_inspector.VC_DISK_READ_RATE_CNTR: 1, - vsphere_inspector.VC_DISK_READ_REQUESTS_RATE_CNTR: 2, - vsphere_inspector.VC_DISK_WRITE_RATE_CNTR: 3, - vsphere_inspector.VC_DISK_WRITE_REQUESTS_RATE_CNTR: 4 - } - counter_id_to_stats_map = { - 1: {disk1: 1, disk2: 2}, - 2: {disk1: 300, disk2: 400}, - 3: {disk1: 5, disk2: 6}, - 4: {disk1: 700}, - } - - def get_counter_id_side_effect(counter_full_name): - return counter_name_to_id_map[counter_full_name] - - def query_stat_side_effect(vm_moid, counter_id, duration): - # assert inputs - self.assertEqual(test_vm_moid, vm_moid) - self.assertIn(counter_id, counter_id_to_stats_map) - return counter_id_to_stats_map[counter_id] - - # configure vsphere operations mock with the test data - ops_mock = self._inspector._ops - ops_mock.get_vm_moid.return_value = test_vm_moid - ops_mock.get_perf_counter_id.side_effect = get_counter_id_side_effect - ops_mock.query_vm_device_stats.side_effect = query_stat_side_effect - - result = self._inspector.inspect_disk_rates(mock.MagicMock()) - - # validate result - expected_stats = { - disk1: virt_inspector.DiskRateStats(1024, 300, 5120, 700), - disk2: virt_inspector.DiskRateStats(2048, 400, 6144, 0) - } - - actual_stats = dict((disk.device, rates) for (disk, rates) in result) - self.assertEqual(expected_stats, actual_stats) diff --git a/ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py b/ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py deleted file mode 100644 index 1d7ba148..00000000 --- a/ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright (c) 2014 VMware, Inc. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_vmware import api -from oslotest import base - -from ceilometer.compute.virt.vmware import vsphere_operations - - -class VsphereOperationsTest(base.BaseTestCase): - - def setUp(self): - api_session = api.VMwareAPISession("test_server", "test_user", - "test_password", 0, None, - create_session=False) - api_session._vim = mock.MagicMock() - self._vsphere_ops = vsphere_operations.VsphereOperations(api_session, - 1000) - super(VsphereOperationsTest, self).setUp() - - def test_get_vm_moid(self): - - vm1_moid = "vm-1" - vm2_moid = "vm-2" - vm1_instance = "0a651a71-142c-4813-aaa6-42e5d5c80d85" - vm2_instance = "db1d2533-6bef-4cb2-aef3-920e109f5693" - - def construct_mock_vm_object(vm_moid, vm_instance): - vm_object = mock.MagicMock() - vm_object.obj.value = vm_moid - vm_object.propSet[0].val = vm_instance - return vm_object - - def retrieve_props_side_effect(pc, specSet, options): - # assert inputs - self.assertEqual(self._vsphere_ops._max_objects, - options.maxObjects) - self.assertEqual(vsphere_operations.VM_INSTANCE_ID_PROPERTY, - specSet[0].pathSet[0]) - - # mock return result - vm1 = construct_mock_vm_object(vm1_moid, vm1_instance) - vm2 = construct_mock_vm_object(vm2_moid, vm2_instance) - result = mock.MagicMock() - result.objects.__iter__.return_value = [vm1, vm2] - return result - - vim_mock = self._vsphere_ops._api_session._vim - vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect - vim_mock.ContinueRetrievePropertiesEx.return_value = None - - vm_moid = self._vsphere_ops.get_vm_moid(vm1_instance) - self.assertEqual(vm1_moid, vm_moid) - - vm_moid = self._vsphere_ops.get_vm_moid(vm2_instance) - self.assertEqual(vm2_moid, vm_moid) - - def test_query_vm_property(self): - - vm_moid = "vm-21" - vm_property_name = "runtime.powerState" - vm_property_val = "poweredON" - - def retrieve_props_side_effect(pc, specSet, options): - # assert inputs - self.assertEqual(vm_moid, specSet[0].obj.value) - self.assertEqual(vm_property_name, specSet[0].pathSet[0]) - - # mock return result - result = mock.MagicMock() - result.objects[0].propSet[0].val = vm_property_val - return result - - vim_mock = self._vsphere_ops._api_session._vim - vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect - - actual_val = self._vsphere_ops.query_vm_property(vm_moid, - vm_property_name) - self.assertEqual(vm_property_val, actual_val) - - def test_get_perf_counter_id(self): - - def construct_mock_counter_info(group_name, counter_name, rollup_type, - counter_id): - counter_info = mock.MagicMock() - counter_info.groupInfo.key = group_name - counter_info.nameInfo.key = counter_name - counter_info.rollupType = rollup_type - counter_info.key = counter_id - return counter_info - - def retrieve_props_side_effect(pc, specSet, options): - # assert inputs - self.assertEqual(vsphere_operations.PERF_COUNTER_PROPERTY, - specSet[0].pathSet[0]) - - # mock return result - counter_info1 = construct_mock_counter_info("a", "b", "c", 1) - counter_info2 = construct_mock_counter_info("x", "y", "z", 2) - result = mock.MagicMock() - (result.objects[0].propSet[0].val.PerfCounterInfo.__iter__. - return_value) = [counter_info1, counter_info2] - return result - - vim_mock = self._vsphere_ops._api_session._vim - vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect - - counter_id = self._vsphere_ops.get_perf_counter_id("a:b:c") - self.assertEqual(1, counter_id) - - counter_id = self._vsphere_ops.get_perf_counter_id("x:y:z") - self.assertEqual(2, counter_id) - - def test_query_vm_stats(self): - - vm_moid = "vm-21" - device1 = "device-1" - device2 = "device-2" - device3 = "device-3" - counter_id = 5 - - def construct_mock_metric_series(device_name, stat_values): - metric_series = mock.MagicMock() - metric_series.value = stat_values - metric_series.id.instance = device_name - return metric_series - - def vim_query_perf_side_effect(perf_manager, querySpec): - # assert inputs - self.assertEqual(vm_moid, querySpec[0].entity.value) - self.assertEqual(counter_id, querySpec[0].metricId[0].counterId) - self.assertEqual(vsphere_operations.VC_REAL_TIME_SAMPLING_INTERVAL, - querySpec[0].intervalId) - - # mock return result - perf_stats = mock.MagicMock() - perf_stats[0].sampleInfo = ["s1", "s2", "s3"] - perf_stats[0].value.__iter__.return_value = [ - construct_mock_metric_series(None, [111, 222, 333]), - construct_mock_metric_series(device1, [100, 200, 300]), - construct_mock_metric_series(device2, [10, 20, 30]), - construct_mock_metric_series(device3, [1, 2, 3]) - ] - return perf_stats - - vim_mock = self._vsphere_ops._api_session._vim - vim_mock.QueryPerf.side_effect = vim_query_perf_side_effect - ops = self._vsphere_ops - - # test aggregate stat - stat_val = ops.query_vm_aggregate_stats(vm_moid, counter_id, 60) - self.assertEqual(222, stat_val) - - # test per-device(non-aggregate) stats - expected_device_stats = { - device1: 200, - device2: 20, - device3: 2 - } - stats = ops.query_vm_device_stats(vm_moid, counter_id, 60) - self.assertEqual(expected_device_stats, stats) diff --git a/ceilometer/tests/unit/compute/virt/xenapi/__init__.py b/ceilometer/tests/unit/compute/virt/xenapi/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py b/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py deleted file mode 100644 index c5d5390f..00000000 --- a/ceilometer/tests/unit/compute/virt/xenapi/test_inspector.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright 2014 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for xenapi inspector. -""" - -import mock -from oslotest import base - -from ceilometer.compute.virt import inspector as virt_inspector -from ceilometer.compute.virt.xenapi import inspector as xenapi_inspector - - -class TestSwapXapiHost(base.BaseTestCase): - - def test_swapping(self): - self.assertEqual( - "http://otherserver:8765/somepath", - xenapi_inspector.swap_xapi_host( - "http://someserver:8765/somepath", 'otherserver')) - - def test_no_port(self): - self.assertEqual( - "http://otherserver/somepath", - xenapi_inspector.swap_xapi_host( - "http://someserver/somepath", 'otherserver')) - - def test_no_path(self): - self.assertEqual( - "http://otherserver", - xenapi_inspector.swap_xapi_host( - "http://someserver", 'otherserver')) - - def test_same_hostname_path(self): - self.assertEqual( - "http://other:80/some", - xenapi_inspector.swap_xapi_host( - "http://some:80/some", 'other')) - - -class TestXenapiInspection(base.BaseTestCase): - - def setUp(self): - api_session = mock.Mock() - xenapi_inspector.get_api_session = mock.Mock(return_value=api_session) - self.inspector = xenapi_inspector.XenapiInspector() - - super(TestXenapiInspection, self).setUp() - - def test_inspect_cpu_util(self): - fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', - 'id': 'fake_instance_id'} - fake_stat = virt_inspector.CPUUtilStats(util=40) - - def fake_xenapi_request(method, args): - metrics_rec = { - 'memory_actual': '536870912', - 'VCPUs_number': '1', - 'VCPUs_utilisation': {'0': 0.4, } - } - - if method == 'VM.get_by_name_label': - return ['vm_ref'] - elif method == 'VM.get_metrics': - return 'metrics_ref' - elif method == 'VM_metrics.get_record': - return metrics_rec - else: - return None - - session = self.inspector.session - with mock.patch.object(session, 'xenapi_request', - side_effect=fake_xenapi_request): - cpu_util_stat = self.inspector.inspect_cpu_util(fake_instance) - self.assertEqual(fake_stat, cpu_util_stat) - - def test_inspect_memory_usage(self): - fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', - 'id': 'fake_instance_id'} - fake_stat = virt_inspector.MemoryUsageStats(usage=128) - - def fake_xenapi_request(method, args): - metrics_rec = { - 'memory_actual': '134217728', - } - - if method == 'VM.get_by_name_label': - return ['vm_ref'] - elif method == 'VM.get_metrics': - return 'metrics_ref' - elif method == 'VM_metrics.get_record': - return metrics_rec - else: - return None - - session = self.inspector.session - with mock.patch.object(session, 'xenapi_request', - side_effect=fake_xenapi_request): - memory_stat = self.inspector.inspect_memory_usage(fake_instance) - self.assertEqual(fake_stat, memory_stat) - - def test_inspect_vnic_rates(self): - fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', - 'id': 'fake_instance_id'} - - def fake_xenapi_request(method, args): - vif_rec = { - 'metrics': 'vif_metrics_ref', - 'uuid': 'vif_uuid', - 'MAC': 'vif_mac', - } - - vif_metrics_rec = { - 'io_read_kbs': '1', - 'io_write_kbs': '2', - } - if method == 'VM.get_by_name_label': - return ['vm_ref'] - elif method == 'VM.get_VIFs': - return ['vif_ref'] - elif method == 'VIF.get_record': - return vif_rec - elif method == 'VIF.get_metrics': - return 'vif_metrics_ref' - elif method == 'VIF_metrics.get_record': - return vif_metrics_rec - else: - return None - - session = self.inspector.session - with mock.patch.object(session, 'xenapi_request', - side_effect=fake_xenapi_request): - interfaces = list(self.inspector.inspect_vnic_rates(fake_instance)) - - self.assertEqual(1, len(interfaces)) - vnic0, info0 = interfaces[0] - self.assertEqual('vif_uuid', vnic0.name) - self.assertEqual('vif_mac', vnic0.mac) - self.assertEqual(1024, info0.rx_bytes_rate) - self.assertEqual(2048, info0.tx_bytes_rate) - - def test_inspect_disk_rates(self): - fake_instance = {'OS-EXT-SRV-ATTR:instance_name': 'fake_instance_name', - 'id': 'fake_instance_id'} - - def fake_xenapi_request(method, args): - vbd_rec = { - 'device': 'xvdd' - } - - vbd_metrics_rec = { - 'io_read_kbs': '1', - 'io_write_kbs': '2' - } - if method == 'VM.get_by_name_label': - return ['vm_ref'] - elif method == 'VM.get_VBDs': - return ['vbd_ref'] - elif method == 'VBD.get_record': - return vbd_rec - elif method == 'VBD.get_metrics': - return 'vbd_metrics_ref' - elif method == 'VBD_metrics.get_record': - return vbd_metrics_rec - else: - return None - - session = self.inspector.session - with mock.patch.object(session, 'xenapi_request', - side_effect=fake_xenapi_request): - disks = list(self.inspector.inspect_disk_rates(fake_instance)) - - self.assertEqual(1, len(disks)) - disk0, info0 = disks[0] - self.assertEqual('xvdd', disk0.device) - self.assertEqual(1024, info0.read_bytes_rate) - self.assertEqual(2048, info0.write_bytes_rate) diff --git a/ceilometer/tests/unit/dispatcher/test_db.py b/ceilometer/tests/unit/dispatcher/test_db.py index a10c4c3e..4fa1f689 100644 --- a/ceilometer/tests/unit/dispatcher/test_db.py +++ b/ceilometer/tests/unit/dispatcher/test_db.py @@ -21,7 +21,6 @@ from oslotest import base from ceilometer.dispatcher import database from ceilometer.event.storage import models as event_models -from ceilometer.publisher import utils class TestDispatcherDB(base.BaseTestCase): @@ -36,84 +35,8 @@ class TestDispatcherDB(base.BaseTestCase): def test_event_conn(self): event = event_models.Event(uuid.uuid4(), 'test', datetime.datetime(2012, 7, 2, 13, 53, 40), - [], {}) - event = utils.message_from_event(event, - self.CONF.publisher.telemetry_secret) + [], {}).serialize() with mock.patch.object(self.dispatcher.event_conn, 'record_events') as record_events: self.dispatcher.record_events(event) self.assertEqual(1, len(record_events.call_args_list[0][0][0])) - - def test_valid_message(self): - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - } - msg['message_signature'] = utils.compute_signature( - msg, self.CONF.publisher.telemetry_secret, - ) - - with mock.patch.object(self.dispatcher.meter_conn, - 'record_metering_data') as record_metering_data: - self.dispatcher.verify_and_record_metering_data(msg) - - record_metering_data.assert_called_once_with(msg) - - def test_invalid_message(self): - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - 'message_signature': 'invalid-signature'} - - class ErrorConnection(object): - - called = False - - def record_metering_data(self, data): - self.called = True - - self.dispatcher._meter_conn = ErrorConnection() - - self.dispatcher.verify_and_record_metering_data(msg) - - if self.dispatcher.meter_conn.called: - self.fail('Should not have called the storage connection') - - def test_timestamp_conversion(self): - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - 'timestamp': '2012-07-02T13:53:40Z', - } - msg['message_signature'] = utils.compute_signature( - msg, self.CONF.publisher.telemetry_secret, - ) - - expected = msg.copy() - expected['timestamp'] = datetime.datetime(2012, 7, 2, 13, 53, 40) - - with mock.patch.object(self.dispatcher.meter_conn, - 'record_metering_data') as record_metering_data: - self.dispatcher.verify_and_record_metering_data(msg) - - record_metering_data.assert_called_once_with(expected) - - def test_timestamp_tzinfo_conversion(self): - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - 'timestamp': '2012-09-30T15:31:50.262-08:00', - } - msg['message_signature'] = utils.compute_signature( - msg, self.CONF.publisher.telemetry_secret, - ) - - expected = msg.copy() - expected['timestamp'] = datetime.datetime(2012, 9, 30, 23, - 31, 50, 262000) - - with mock.patch.object(self.dispatcher.meter_conn, - 'record_metering_data') as record_metering_data: - self.dispatcher.verify_and_record_metering_data(msg) - - record_metering_data.assert_called_once_with(expected) diff --git a/ceilometer/tests/unit/dispatcher/test_dispatcher.py b/ceilometer/tests/unit/dispatcher/test_dispatcher.py deleted file mode 100644 index 780c3128..00000000 --- a/ceilometer/tests/unit/dispatcher/test_dispatcher.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright 2015 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import fixture -from oslotest import mockpatch - -from ceilometer import dispatcher -from ceilometer.tests import base - - -class FakeDispatcherSample(dispatcher.MeterDispatcherBase): - def record_metering_data(self, data): - pass - - -class FakeDispatcher(dispatcher.MeterDispatcherBase, - dispatcher.EventDispatcherBase): - def record_metering_data(self, data): - pass - - def record_events(self, events): - pass - - -class TestDispatchManager(base.BaseTestCase): - def setUp(self): - super(TestDispatchManager, self).setUp() - self.conf = self.useFixture(fixture.Config()) - self.conf.config(meter_dispatchers=['database', 'gnocchi'], - event_dispatchers=['database']) - self.useFixture(mockpatch.Patch( - 'ceilometer.dispatcher.gnocchi.GnocchiDispatcher', - new=FakeDispatcherSample)) - self.useFixture(mockpatch.Patch( - 'ceilometer.dispatcher.database.DatabaseDispatcher', - new=FakeDispatcher)) - - def test_load(self): - sample_mg, event_mg = dispatcher.load_dispatcher_manager() - self.assertEqual(2, len(list(sample_mg))) - self.assertEqual(1, len(list(event_mg))) diff --git a/ceilometer/tests/unit/dispatcher/test_file.py b/ceilometer/tests/unit/dispatcher/test_file.py deleted file mode 100644 index ab54e42b..00000000 --- a/ceilometer/tests/unit/dispatcher/test_file.py +++ /dev/null @@ -1,100 +0,0 @@ -# -# Copyright 2013 IBM Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import logging.handlers -import os -import tempfile - -from oslo_config import fixture as fixture_config -from oslotest import base - -from ceilometer.dispatcher import file -from ceilometer.publisher import utils - - -class TestDispatcherFile(base.BaseTestCase): - - def setUp(self): - super(TestDispatcherFile, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - - def test_file_dispatcher_with_all_config(self): - # Create a temporaryFile to get a file name - tf = tempfile.NamedTemporaryFile('r') - filename = tf.name - tf.close() - - self.CONF.dispatcher_file.file_path = filename - self.CONF.dispatcher_file.max_bytes = 50 - self.CONF.dispatcher_file.backup_count = 5 - dispatcher = file.FileDispatcher(self.CONF) - - # The number of the handlers should be 1 - self.assertEqual(1, len(dispatcher.log.handlers)) - # The handler should be RotatingFileHandler - handler = dispatcher.log.handlers[0] - self.assertIsInstance(handler, - logging.handlers.RotatingFileHandler) - - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - } - msg['message_signature'] = utils.compute_signature( - msg, self.CONF.publisher.telemetry_secret, - ) - - # The record_metering_data method should exist - # and not produce errors. - dispatcher.verify_and_record_metering_data(msg) - # After the method call above, the file should have been created. - self.assertTrue(os.path.exists(handler.baseFilename)) - - def test_file_dispatcher_with_path_only(self): - # Create a temporaryFile to get a file name - tf = tempfile.NamedTemporaryFile('r') - filename = tf.name - tf.close() - - self.CONF.dispatcher_file.file_path = filename - self.CONF.dispatcher_file.max_bytes = 0 - self.CONF.dispatcher_file.backup_count = 0 - dispatcher = file.FileDispatcher(self.CONF) - - # The number of the handlers should be 1 - self.assertEqual(1, len(dispatcher.log.handlers)) - # The handler should be RotatingFileHandler - handler = dispatcher.log.handlers[0] - self.assertIsInstance(handler, - logging.FileHandler) - - msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - } - msg['message_signature'] = utils.compute_signature( - msg, self.CONF.publisher.telemetry_secret, - ) - - # The record_metering_data method should exist and not produce errors. - dispatcher.verify_and_record_metering_data(msg) - # After the method call above, the file should have been created. - self.assertTrue(os.path.exists(handler.baseFilename)) - - def test_file_dispatcher_with_no_path(self): - self.CONF.dispatcher_file.file_path = None - dispatcher = file.FileDispatcher(self.CONF) - - # The log should be None - self.assertIsNone(dispatcher.log) diff --git a/ceilometer/tests/unit/dispatcher/test_gnocchi.py b/ceilometer/tests/unit/dispatcher/test_gnocchi.py deleted file mode 100644 index 9039b8d2..00000000 --- a/ceilometer/tests/unit/dispatcher/test_gnocchi.py +++ /dev/null @@ -1,445 +0,0 @@ -# -# Copyright 2014 eNovance -# -# Authors: Mehdi Abaakouk -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import uuid - -from gnocchiclient import exceptions as gnocchi_exc -from gnocchiclient import utils as gnocchi_utils -from keystoneauth1 import exceptions as ka_exceptions -import mock -from oslo_config import fixture as config_fixture -from oslo_utils import fileutils -from oslotest import mockpatch -import requests -import six -import testscenarios - -from ceilometer.dispatcher import gnocchi -from ceilometer.publisher import utils -from ceilometer import service as ceilometer_service -from ceilometer.tests import base - -load_tests = testscenarios.load_tests_apply_scenarios - - -@mock.patch('gnocchiclient.v1.client.Client', mock.Mock()) -class DispatcherTest(base.BaseTestCase): - - def setUp(self): - super(DispatcherTest, self).setUp() - self.conf = self.useFixture(config_fixture.Config()) - ceilometer_service.prepare_service(argv=[], config_files=[]) - self.conf.config( - resources_definition_file=self.path_get( - 'etc/ceilometer/gnocchi_resources.yaml'), - group="dispatcher_gnocchi" - ) - self.resource_id = str(uuid.uuid4()) - self.samples = [{ - 'counter_name': 'disk.root.size', - 'counter_unit': 'GB', - 'counter_type': 'gauge', - 'counter_volume': '2', - 'user_id': 'test_user', - 'project_id': 'test_project', - 'source': 'openstack', - 'timestamp': '2012-05-08 20:23:48.028195', - 'resource_id': self.resource_id, - 'resource_metadata': { - 'host': 'foo', - 'image_ref': 'imageref!', - 'instance_flavor_id': 1234, - 'display_name': 'myinstance', - } - }, - { - 'counter_name': 'disk.root.size', - 'counter_unit': 'GB', - 'counter_type': 'gauge', - 'counter_volume': '2', - 'user_id': 'test_user', - 'project_id': 'test_project', - 'source': 'openstack', - 'timestamp': '2014-05-08 20:23:48.028195', - 'resource_id': self.resource_id, - 'resource_metadata': { - 'host': 'foo', - 'image_ref': 'imageref!', - 'instance_flavor_id': 1234, - 'display_name': 'myinstance', - } - }] - for sample in self.samples: - sample['message_signature'] = utils.compute_signature( - sample, self.conf.conf.publisher.telemetry_secret) - - ks_client = mock.Mock(auth_token='fake_token') - ks_client.projects.find.return_value = mock.Mock( - name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') - self.useFixture(mockpatch.Patch( - 'ceilometer.keystone_client.get_client', - return_value=ks_client)) - self.ks_client = ks_client - self.conf.conf.dispatcher_gnocchi.filter_service_activity = True - - def test_config_load(self): - self.conf.config(filter_service_activity=False, - group='dispatcher_gnocchi') - d = gnocchi.GnocchiDispatcher(self.conf.conf) - names = [rd.cfg['resource_type'] for rd in d.resources_definition] - self.assertIn('instance', names) - self.assertIn('volume', names) - - @mock.patch('ceilometer.dispatcher.gnocchi.LOG') - def test_broken_config_load(self, mylog): - contents = [("---\n" - "resources:\n" - " - resource_type: foobar\n"), - ("---\n" - "resources:\n" - " - resource_type: 0\n"), - ("---\n" - "resources:\n" - " - sample_types: ['foo', 'bar']\n"), - ("---\n" - "resources:\n" - " - sample_types: foobar\n" - " - resource_type: foobar\n"), - ] - - for content in contents: - if six.PY3: - content = content.encode('utf-8') - - temp = fileutils.write_to_tempfile(content=content, - prefix='gnocchi_resources', - suffix='.yaml') - self.addCleanup(os.remove, temp) - self.conf.config(filter_service_activity=False, - resources_definition_file=temp, - group='dispatcher_gnocchi') - d = gnocchi.GnocchiDispatcher(self.conf.conf) - self.assertTrue(mylog.error.called) - self.assertEqual(0, len(d.resources_definition)) - - @mock.patch('ceilometer.dispatcher.gnocchi.GnocchiDispatcher' - '._if_not_cached') - @mock.patch('ceilometer.dispatcher.gnocchi.GnocchiDispatcher' - '.batch_measures') - def _do_test_activity_filter(self, expected_measures, fake_batch, __): - - d = gnocchi.GnocchiDispatcher(self.conf.conf) - d.verify_and_record_metering_data(self.samples) - fake_batch.assert_called_with( - mock.ANY, mock.ANY, - {'metrics': 1, 'resources': 1, 'measures': expected_measures}) - - def test_activity_filter_match_project_id(self): - self.samples[0]['project_id'] = ( - 'a2d42c23-d518-46b6-96ab-3fba2e146859') - self._do_test_activity_filter(1) - - @mock.patch('ceilometer.dispatcher.gnocchi.LOG') - def test_activity_gnocchi_project_not_found(self, logger): - self.ks_client.projects.find.side_effect = ka_exceptions.NotFound - self._do_test_activity_filter(2) - logger.warning.assert_called_with('gnocchi project not found in ' - 'keystone, ignoring the ' - 'filter_service_activity option') - - def test_activity_filter_match_swift_event(self): - self.samples[0]['counter_name'] = 'storage.api.request' - self.samples[0]['resource_id'] = 'a2d42c23-d518-46b6-96ab-3fba2e146859' - self._do_test_activity_filter(1) - - def test_activity_filter_nomatch(self): - self._do_test_activity_filter(2) - - -class MockResponse(mock.NonCallableMock): - def __init__(self, code): - text = {500: 'Internal Server Error', - 404: 'Not Found', - 204: 'Created', - 409: 'Conflict', - }.get(code) - super(MockResponse, self).__init__(spec=requests.Response, - status_code=code, - text=text) - - -class DispatcherWorkflowTest(base.BaseTestCase, - testscenarios.TestWithScenarios): - - sample_scenarios = [ - ('disk.root.size', dict( - sample={ - 'counter_name': 'disk.root.size', - 'counter_unit': 'GB', - 'counter_type': 'gauge', - 'counter_volume': '2', - 'user_id': 'test_user', - 'project_id': 'test_project', - 'source': 'openstack', - 'timestamp': '2012-05-08 20:23:48.028195', - 'resource_metadata': { - 'host': 'foo', - 'image_ref': 'imageref!', - 'instance_flavor_id': 1234, - 'display_name': 'myinstance', - } - }, - measures_attributes=[{ - 'timestamp': '2012-05-08 20:23:48.028195', - 'value': '2' - }], - postable_attributes={ - 'user_id': 'test_user', - 'project_id': 'test_project', - }, - patchable_attributes={ - 'host': 'foo', - 'image_ref': 'imageref!', - 'flavor_id': 1234, - 'display_name': 'myinstance', - }, - metric_names=[ - 'instance', 'disk.root.size', 'disk.ephemeral.size', - 'memory', 'vcpus', 'memory.usage', 'memory.resident', - 'cpu', 'cpu.delta', 'cpu_util', 'vcpus', 'disk.read.requests', - 'disk.read.requests.rate', 'disk.write.requests', - 'disk.write.requests.rate', 'disk.read.bytes', - 'disk.read.bytes.rate', 'disk.write.bytes', - 'disk.write.bytes.rate', 'disk.latency', 'disk.iops', - 'disk.capacity', 'disk.allocation', 'disk.usage'], - resource_type='instance')), - ('hardware.ipmi.node.power', dict( - sample={ - 'counter_name': 'hardware.ipmi.node.power', - 'counter_unit': 'W', - 'counter_type': 'gauge', - 'counter_volume': '2', - 'user_id': 'test_user', - 'project_id': 'test_project', - 'source': 'openstack', - 'timestamp': '2012-05-08 20:23:48.028195', - 'resource_metadata': { - 'useless': 'not_used', - } - }, - measures_attributes=[{ - 'timestamp': '2012-05-08 20:23:48.028195', - 'value': '2' - }], - postable_attributes={ - 'user_id': 'test_user', - 'project_id': 'test_project', - }, - patchable_attributes={ - }, - metric_names=[ - 'hardware.ipmi.node.power', 'hardware.ipmi.node.temperature', - 'hardware.ipmi.node.inlet_temperature', - 'hardware.ipmi.node.outlet_temperature', - 'hardware.ipmi.node.fan', 'hardware.ipmi.node.current', - 'hardware.ipmi.node.voltage', 'hardware.ipmi.node.airflow', - 'hardware.ipmi.node.cups', 'hardware.ipmi.node.cpu_util', - 'hardware.ipmi.node.mem_util', 'hardware.ipmi.node.io_util' - ], - resource_type='ipmi')), - ] - - default_workflow = dict(resource_exists=True, - metric_exists=True, - post_measure_fail=False, - create_resource_fail=False, - create_metric_fail=False, - update_resource_fail=False, - retry_post_measures_fail=False) - workflow_scenarios = [ - ('normal_workflow', {}), - ('new_resource', dict(resource_exists=False)), - ('new_resource_fail', dict(resource_exists=False, - create_resource_fail=True)), - ('resource_update_fail', dict(update_resource_fail=True)), - ('new_metric', dict(metric_exists=False)), - ('new_metric_fail', dict(metric_exists=False, - create_metric_fail=True)), - ('retry_fail', dict(resource_exists=False, - retry_post_measures_fail=True)), - ('measure_fail', dict(post_measure_fail=True)), - ] - - @classmethod - def generate_scenarios(cls): - workflow_scenarios = [] - for name, wf_change in cls.workflow_scenarios: - wf = cls.default_workflow.copy() - wf.update(wf_change) - workflow_scenarios.append((name, wf)) - cls.scenarios = testscenarios.multiply_scenarios(cls.sample_scenarios, - workflow_scenarios) - - def setUp(self): - super(DispatcherWorkflowTest, self).setUp() - self.conf = self.useFixture(config_fixture.Config()) - # Set this explicitly to avoid conflicts with any existing - # configuration. - self.conf.config(url='http://localhost:8041', - group='dispatcher_gnocchi') - ks_client = mock.Mock() - ks_client.projects.find.return_value = mock.Mock( - name='gnocchi', id='a2d42c23-d518-46b6-96ab-3fba2e146859') - self.useFixture(mockpatch.Patch( - 'ceilometer.keystone_client.get_client', - return_value=ks_client)) - self.ks_client = ks_client - - ceilometer_service.prepare_service(argv=[], config_files=[]) - self.conf.config( - resources_definition_file=self.path_get( - 'etc/ceilometer/gnocchi_resources.yaml'), - group="dispatcher_gnocchi" - ) - - self.sample['resource_id'] = str(uuid.uuid4()) + "/foobar" - self.sample['message_signature'] = utils.compute_signature( - self.sample, self.conf.conf.publisher.telemetry_secret) - - @mock.patch('ceilometer.dispatcher.gnocchi.LOG') - @mock.patch('gnocchiclient.v1.client.Client') - def test_workflow(self, fakeclient_cls, logger): - self.dispatcher = gnocchi.GnocchiDispatcher(self.conf.conf) - - fakeclient = fakeclient_cls.return_value - - # FIXME(sileht): we don't use urlparse.quote here - # to ensure / is converted in %2F - # temporary disabled until we find a solution - # on gnocchi side. Current gnocchiclient doesn't - # encode the resource_id - resource_id = self.sample['resource_id'] # .replace("/", "%2F"), - metric_name = self.sample['counter_name'] - gnocchi_id = gnocchi_utils.encode_resource_id(resource_id) - - expected_calls = [ - mock.call.capabilities.list(), - mock.call.metric.batch_resources_metrics_measures( - {gnocchi_id: {metric_name: self.measures_attributes}}) - ] - expected_debug = [ - mock.call('gnocchi project found: %s', - 'a2d42c23-d518-46b6-96ab-3fba2e146859'), - ] - - measures_posted = False - batch_side_effect = [] - if self.post_measure_fail: - batch_side_effect += [Exception('boom!')] - elif not self.resource_exists or not self.metric_exists: - batch_side_effect += [ - gnocchi_exc.BadRequest( - 400, "Unknown metrics: %s/%s" % (gnocchi_id, - metric_name))] - attributes = self.postable_attributes.copy() - attributes.update(self.patchable_attributes) - attributes['id'] = self.sample['resource_id'] - attributes['metrics'] = dict((metric_name, {}) - for metric_name in self.metric_names) - for k, v in six.iteritems(attributes['metrics']): - if k == 'disk.root.size': - v['unit'] = 'GB' - continue - if k == 'hardware.ipmi.node.power': - v['unit'] = 'W' - continue - expected_calls.append(mock.call.resource.create( - self.resource_type, attributes)) - - if self.create_resource_fail: - fakeclient.resource.create.side_effect = [Exception('boom!')] - elif self.resource_exists: - fakeclient.resource.create.side_effect = [ - gnocchi_exc.ResourceAlreadyExists(409)] - - expected_calls.append(mock.call.metric.create({ - 'name': self.sample['counter_name'], - 'unit': self.sample['counter_unit'], - 'resource_id': resource_id})) - if self.create_metric_fail: - fakeclient.metric.create.side_effect = [Exception('boom!')] - elif self.metric_exists: - fakeclient.metric.create.side_effect = [ - gnocchi_exc.NamedMetricAreadyExists(409)] - else: - fakeclient.metric.create.side_effect = [None] - - else: # not resource_exists - expected_debug.append(mock.call( - 'Resource %s created', self.sample['resource_id'])) - - if not self.create_resource_fail and not self.create_metric_fail: - expected_calls.append( - mock.call.metric.batch_resources_metrics_measures( - {gnocchi_id: {metric_name: self.measures_attributes}}) - ) - - if self.retry_post_measures_fail: - batch_side_effect += [Exception('boom!')] - else: - measures_posted = True - - else: - measures_posted = True - - if measures_posted: - batch_side_effect += [None] - expected_debug.append( - mock.call("%(measures)d measures posted against %(metrics)d " - "metrics through %(resources)d resources", dict( - measures=len(self.measures_attributes), - metrics=1, resources=1)) - ) - - if self.patchable_attributes: - expected_calls.append(mock.call.resource.update( - self.resource_type, resource_id, - self.patchable_attributes)) - if self.update_resource_fail: - fakeclient.resource.update.side_effect = [Exception('boom!')] - else: - expected_debug.append(mock.call( - 'Resource %s updated', self.sample['resource_id'])) - - batch = fakeclient.metric.batch_resources_metrics_measures - batch.side_effect = batch_side_effect - - self.dispatcher.verify_and_record_metering_data([self.sample]) - - # Check that the last log message is the expected one - if (self.post_measure_fail or self.create_metric_fail - or self.create_resource_fail - or self.retry_post_measures_fail - or (self.update_resource_fail and self.patchable_attributes)): - logger.error.assert_called_with('boom!', exc_info=True) - else: - self.assertEqual(0, logger.error.call_count) - self.assertEqual(expected_calls, fakeclient.mock_calls) - self.assertEqual(expected_debug, logger.debug.mock_calls) - -DispatcherWorkflowTest.generate_scenarios() diff --git a/ceilometer/tests/unit/dispatcher/test_http.py b/ceilometer/tests/unit/dispatcher/test_http.py deleted file mode 100644 index 8e74f056..00000000 --- a/ceilometer/tests/unit/dispatcher/test_http.py +++ /dev/null @@ -1,121 +0,0 @@ -# -# Copyright 2013 IBM Corp -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import uuid - -import mock -from oslo_config import fixture as fixture_config -from oslotest import base -import requests - -from ceilometer.dispatcher import http -from ceilometer.event.storage import models as event_models -from ceilometer.publisher import utils - - -class TestDispatcherHttp(base.BaseTestCase): - - def setUp(self): - super(TestDispatcherHttp, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.msg = {'counter_name': 'test', - 'resource_id': self.id(), - 'counter_volume': 1, - } - self.msg['message_signature'] = utils.compute_signature( - self.msg, self.CONF.publisher.telemetry_secret, - ) - - def test_http_dispatcher_config_options(self): - self.CONF.dispatcher_http.target = 'fake' - self.CONF.dispatcher_http.timeout = 2 - dispatcher = http.HttpDispatcher(self.CONF) - - self.assertEqual('fake', dispatcher.target) - self.assertEqual(2, dispatcher.timeout) - - def test_http_dispatcher_with_no_target(self): - self.CONF.dispatcher_http.target = '' - dispatcher = http.HttpDispatcher(self.CONF) - - # The target should be None - self.assertEqual('', dispatcher.target) - - with mock.patch.object(requests, 'post') as post: - dispatcher.verify_and_record_metering_data(self.msg) - - # Since the target is not set, no http post should occur, thus the - # call_count should be zero. - self.assertEqual(0, post.call_count) - - def test_http_dispatcher_with_no_metadata(self): - self.CONF.dispatcher_http.target = 'fake' - dispatcher = http.HttpDispatcher(self.CONF) - - with mock.patch.object(requests, 'post') as post: - dispatcher.verify_and_record_metering_data(self.msg) - - self.assertEqual(1, post.call_count) - - -class TestEventDispatcherHttp(base.BaseTestCase): - - def setUp(self): - super(TestEventDispatcherHttp, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - - def test_http_dispatcher(self): - self.CONF.dispatcher_http.event_target = 'fake' - dispatcher = http.HttpDispatcher(self.CONF) - - event = event_models.Event(uuid.uuid4(), 'test', - datetime.datetime(2012, 7, 2, 13, 53, 40), - [], {}) - event = utils.message_from_event(event, - self.CONF.publisher.telemetry_secret) - - with mock.patch.object(requests, 'post') as post: - dispatcher.record_events(event) - - self.assertEqual(1, post.call_count) - - def test_http_dispatcher_bad(self): - self.CONF.dispatcher_http.event_target = '' - dispatcher = http.HttpDispatcher(self.CONF) - - event = event_models.Event(uuid.uuid4(), 'test', - datetime.datetime(2012, 7, 2, 13, 53, 40), - [], {}) - event = utils.message_from_event(event, - self.CONF.publisher.telemetry_secret) - with mock.patch('ceilometer.dispatcher.http.LOG', - mock.MagicMock()) as LOG: - dispatcher.record_events(event) - self.assertTrue(LOG.exception.called) - - def test_http_dispatcher_share_target(self): - self.CONF.dispatcher_http.target = 'fake' - dispatcher = http.HttpDispatcher(self.CONF) - - event = event_models.Event(uuid.uuid4(), 'test', - datetime.datetime(2012, 7, 2, 13, 53, 40), - [], {}) - event = utils.message_from_event(event, - self.CONF.publisher.telemetry_secret) - with mock.patch.object(requests, 'post') as post: - dispatcher.record_events(event) - - self.assertEqual('fake', post.call_args[0][0]) diff --git a/ceilometer/tests/unit/energy/__init__.py b/ceilometer/tests/unit/energy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/energy/test_kwapi.py b/ceilometer/tests/unit/energy/test_kwapi.py deleted file mode 100644 index eaf9dc0c..00000000 --- a/ceilometer/tests/unit/energy/test_kwapi.py +++ /dev/null @@ -1,135 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from keystoneauth1 import exceptions -import mock -from oslotest import base -from oslotest import mockpatch -import six - -from ceilometer.agent import manager -from ceilometer.energy import kwapi - - -PROBE_DICT = { - "probes": { - "A": { - "timestamp": 1357730232.68754, - "w": 107.3, - "kwh": 0.001058255421506034 - }, - "B": { - "timestamp": 1357730232.048158, - "w": 15.0, - "kwh": 0.029019045026169896 - }, - "C": { - "timestamp": 1357730232.223375, - "w": 95.0, - "kwh": 0.17361822634312918 - } - } -} - -ENDPOINT = 'end://point' - - -class TestManager(manager.AgentManager): - - def __init__(self): - super(TestManager, self).__init__() - self._keystone = mock.Mock() - - -class _BaseTestCase(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(_BaseTestCase, self).setUp() - self.manager = TestManager() - - -class TestKwapi(_BaseTestCase): - - @staticmethod - def fake_get_kwapi_client(ksclient, endpoint): - raise exceptions.EndpointNotFound("fake keystone exception") - - def test_endpoint_not_exist(self): - with mockpatch.PatchObject(kwapi._Base, 'get_kwapi_client', - side_effect=self.fake_get_kwapi_client): - pollster = kwapi.EnergyPollster() - samples = list(pollster.get_samples(self.manager, {}, - [ENDPOINT])) - - self.assertEqual(0, len(samples)) - - -class TestEnergyPollster(_BaseTestCase): - pollster_cls = kwapi.EnergyPollster - unit = 'kwh' - - def setUp(self): - super(TestEnergyPollster, self).setUp() - self.useFixture(mockpatch.PatchObject( - kwapi._Base, '_iter_probes', side_effect=self.fake_iter_probes)) - - @staticmethod - def fake_iter_probes(ksclient, cache, endpoint): - probes = PROBE_DICT['probes'] - for key, value in six.iteritems(probes): - probe_dict = value - probe_dict['id'] = key - yield probe_dict - - def test_default_discovery(self): - pollster = kwapi.EnergyPollster() - self.assertEqual('endpoint:energy', pollster.default_discovery) - - def test_sample(self): - cache = {} - samples = list(self.pollster_cls().get_samples(self.manager, cache, - [ENDPOINT])) - self.assertEqual(len(PROBE_DICT['probes']), len(samples)) - samples_by_name = dict((s.resource_id, s) for s in samples) - for name, probe in PROBE_DICT['probes'].items(): - sample = samples_by_name[name] - self.assertEqual(probe[self.unit], sample.volume) - - -class TestPowerPollster(TestEnergyPollster): - pollster_cls = kwapi.PowerPollster - unit = 'w' - - -class TestEnergyPollsterCache(_BaseTestCase): - pollster_cls = kwapi.EnergyPollster - - def test_get_samples_cached(self): - probe = {'id': 'A'} - probe.update(PROBE_DICT['probes']['A']) - cache = { - '%s-%s' % (ENDPOINT, self.pollster_cls.CACHE_KEY_PROBE): [probe], - } - self.manager._keystone = mock.Mock() - pollster = self.pollster_cls() - with mock.patch.object(pollster, '_get_probes') as do_not_call: - do_not_call.side_effect = AssertionError('should not be called') - samples = list(pollster.get_samples(self.manager, cache, - [ENDPOINT])) - self.assertEqual(1, len(samples)) - - -class TestPowerPollsterCache(TestEnergyPollsterCache): - pollster_cls = kwapi.PowerPollster diff --git a/ceilometer/tests/unit/event/test_converter.py b/ceilometer/tests/unit/event/test_converter.py deleted file mode 100644 index 37ae6702..00000000 --- a/ceilometer/tests/unit/event/test_converter.py +++ /dev/null @@ -1,781 +0,0 @@ -# -# Copyright 2013 Rackspace Hosting. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -import jsonpath_rw_ext -import mock -from oslo_config import fixture as fixture_config -import six - -from ceilometer import declarative -from ceilometer.event import converter -from ceilometer.event.storage import models -from ceilometer import service as ceilometer_service -from ceilometer.tests import base - - -class ConverterBase(base.BaseTestCase): - @staticmethod - def _create_test_notification(event_type, message_id, **kw): - return dict(event_type=event_type, - message_id=message_id, - priority="INFO", - publisher_id="compute.host-1-2-3", - timestamp="2013-08-08 21:06:37.803826", - payload=kw, - ) - - def assertIsValidEvent(self, event, notification): - self.assertIsNot( - None, event, - "Notification dropped unexpectedly:" - " %s" % str(notification)) - self.assertIsInstance(event, models.Event) - - def assertIsNotValidEvent(self, event, notification): - self.assertIs( - None, event, - "Notification NOT dropped when expected to be dropped:" - " %s" % str(notification)) - - def assertHasTrait(self, event, name, value=None, dtype=None): - traits = [trait for trait in event.traits if trait.name == name] - self.assertTrue( - len(traits) > 0, - "Trait %s not found in event %s" % (name, event)) - trait = traits[0] - if value is not None: - self.assertEqual(value, trait.value) - if dtype is not None: - self.assertEqual(dtype, trait.dtype) - if dtype == models.Trait.INT_TYPE: - self.assertIsInstance(trait.value, int) - elif dtype == models.Trait.FLOAT_TYPE: - self.assertIsInstance(trait.value, float) - elif dtype == models.Trait.DATETIME_TYPE: - self.assertIsInstance(trait.value, datetime.datetime) - elif dtype == models.Trait.TEXT_TYPE: - self.assertIsInstance(trait.value, six.string_types) - - def assertDoesNotHaveTrait(self, event, name): - traits = [trait for trait in event.traits if trait.name == name] - self.assertEqual( - len(traits), 0, - "Extra Trait %s found in event %s" % (name, event)) - - def assertHasDefaultTraits(self, event): - text = models.Trait.TEXT_TYPE - self.assertHasTrait(event, 'service', dtype=text) - - def _cmp_tree(self, this, other): - if hasattr(this, 'right') and hasattr(other, 'right'): - return (self._cmp_tree(this.right, other.right) and - self._cmp_tree(this.left, other.left)) - if not hasattr(this, 'right') and not hasattr(other, 'right'): - return this == other - return False - - def assertPathsEqual(self, path1, path2): - self.assertTrue(self._cmp_tree(path1, path2), - 'JSONPaths not equivalent %s %s' % (path1, path2)) - - -class TestTraitDefinition(ConverterBase): - - def setUp(self): - super(TestTraitDefinition, self).setUp() - self.n1 = self._create_test_notification( - "test.thing", - "uuid-for-notif-0001", - instance_uuid="uuid-for-instance-0001", - instance_id="id-for-instance-0001", - instance_uuid2=None, - instance_id2=None, - host='host-1-2-3', - bogus_date='', - image_meta=dict( - disk_gb='20', - thing='whatzit'), - foobar=50) - - self.ext1 = mock.MagicMock(name='mock_test_plugin') - self.test_plugin_class = self.ext1.plugin - self.test_plugin = self.test_plugin_class() - self.test_plugin.trait_values.return_value = ['foobar'] - self.ext1.reset_mock() - - self.ext2 = mock.MagicMock(name='mock_nothing_plugin') - self.nothing_plugin_class = self.ext2.plugin - self.nothing_plugin = self.nothing_plugin_class() - self.nothing_plugin.trait_values.return_value = [None] - self.ext2.reset_mock() - - self.fake_plugin_mgr = dict(test=self.ext1, nothing=self.ext2) - - def test_to_trait_with_plugin(self): - cfg = dict(type='text', - fields=['payload.instance_id', 'payload.instance_uuid'], - plugin=dict(name='test')) - - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('test_trait', t.name) - self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) - self.assertEqual('foobar', t.value) - self.test_plugin_class.assert_called_once_with() - self.test_plugin.trait_values.assert_called_once_with([ - ('payload.instance_id', 'id-for-instance-0001'), - ('payload.instance_uuid', 'uuid-for-instance-0001')]) - - def test_to_trait_null_match_with_plugin(self): - cfg = dict(type='text', - fields=['payload.nothere', 'payload.bogus'], - plugin=dict(name='test')) - - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('test_trait', t.name) - self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) - self.assertEqual('foobar', t.value) - self.test_plugin_class.assert_called_once_with() - self.test_plugin.trait_values.assert_called_once_with([]) - - def test_to_trait_with_plugin_null(self): - cfg = dict(type='text', - fields=['payload.instance_id', 'payload.instance_uuid'], - plugin=dict(name='nothing')) - - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIs(None, t) - self.nothing_plugin_class.assert_called_once_with() - self.nothing_plugin.trait_values.assert_called_once_with([ - ('payload.instance_id', 'id-for-instance-0001'), - ('payload.instance_uuid', 'uuid-for-instance-0001')]) - - def test_to_trait_with_plugin_with_parameters(self): - cfg = dict(type='text', - fields=['payload.instance_id', 'payload.instance_uuid'], - plugin=dict(name='test', parameters=dict(a=1, b='foo'))) - - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('test_trait', t.name) - self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) - self.assertEqual('foobar', t.value) - self.test_plugin_class.assert_called_once_with(a=1, b='foo') - self.test_plugin.trait_values.assert_called_once_with([ - ('payload.instance_id', 'id-for-instance-0001'), - ('payload.instance_uuid', 'uuid-for-instance-0001')]) - - def test_to_trait(self): - cfg = dict(type='text', fields='payload.instance_id') - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('test_trait', t.name) - self.assertEqual(models.Trait.TEXT_TYPE, t.dtype) - self.assertEqual('id-for-instance-0001', t.value) - - cfg = dict(type='int', fields='payload.image_meta.disk_gb') - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('test_trait', t.name) - self.assertEqual(models.Trait.INT_TYPE, t.dtype) - self.assertEqual(20, t.value) - - def test_to_trait_multiple(self): - cfg = dict(type='text', fields=['payload.instance_id', - 'payload.instance_uuid']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('id-for-instance-0001', t.value) - - cfg = dict(type='text', fields=['payload.instance_uuid', - 'payload.instance_id']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('uuid-for-instance-0001', t.value) - - def test_to_trait_multiple_different_nesting(self): - cfg = dict(type='int', fields=['payload.foobar', - 'payload.image_meta.disk_gb']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual(50, t.value) - - cfg = dict(type='int', fields=['payload.image_meta.disk_gb', - 'payload.foobar']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual(20, t.value) - - def test_to_trait_some_null_multiple(self): - cfg = dict(type='text', fields=['payload.instance_id2', - 'payload.instance_uuid']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('uuid-for-instance-0001', t.value) - - def test_to_trait_some_missing_multiple(self): - cfg = dict(type='text', fields=['payload.not_here_boss', - 'payload.instance_uuid']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIsInstance(t, models.Trait) - self.assertEqual('uuid-for-instance-0001', t.value) - - def test_to_trait_missing(self): - cfg = dict(type='text', fields='payload.not_here_boss') - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIs(None, t) - - def test_to_trait_null(self): - cfg = dict(type='text', fields='payload.instance_id2') - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIs(None, t) - - def test_to_trait_empty_nontext(self): - cfg = dict(type='datetime', fields='payload.bogus_date') - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIs(None, t) - - def test_to_trait_multiple_null_missing(self): - cfg = dict(type='text', fields=['payload.not_here_boss', - 'payload.instance_id2']) - tdef = converter.TraitDefinition('test_trait', cfg, - self.fake_plugin_mgr) - t = tdef.to_trait(self.n1) - self.assertIs(None, t) - - def test_missing_fields_config(self): - self.assertRaises(declarative.DefinitionException, - converter.TraitDefinition, - 'bogus_trait', - dict(), - self.fake_plugin_mgr) - - def test_string_fields_config(self): - cfg = dict(fields='payload.test') - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertPathsEqual(t.getter.__self__, - jsonpath_rw_ext.parse('payload.test')) - - def test_list_fields_config(self): - cfg = dict(fields=['payload.test', 'payload.other']) - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertPathsEqual( - t.getter.__self__, - jsonpath_rw_ext.parse('(payload.test)|(payload.other)')) - - def test_invalid_path_config(self): - # test invalid jsonpath... - cfg = dict(fields='payload.bogus(') - self.assertRaises(declarative.DefinitionException, - converter.TraitDefinition, - 'bogus_trait', - cfg, - self.fake_plugin_mgr) - - def test_invalid_plugin_config(self): - # test invalid jsonpath... - cfg = dict(fields='payload.test', plugin=dict(bogus="true")) - self.assertRaises(declarative.DefinitionException, - converter.TraitDefinition, - 'test_trait', - cfg, - self.fake_plugin_mgr) - - def test_unknown_plugin(self): - # test invalid jsonpath... - cfg = dict(fields='payload.test', plugin=dict(name='bogus')) - self.assertRaises(declarative.DefinitionException, - converter.TraitDefinition, - 'test_trait', - cfg, - self.fake_plugin_mgr) - - def test_type_config(self): - cfg = dict(type='text', fields='payload.test') - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertEqual(models.Trait.TEXT_TYPE, t.trait_type) - - cfg = dict(type='int', fields='payload.test') - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertEqual(models.Trait.INT_TYPE, t.trait_type) - - cfg = dict(type='float', fields='payload.test') - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertEqual(models.Trait.FLOAT_TYPE, t.trait_type) - - cfg = dict(type='datetime', fields='payload.test') - t = converter.TraitDefinition('test_trait', cfg, self.fake_plugin_mgr) - self.assertEqual(models.Trait.DATETIME_TYPE, t.trait_type) - - def test_invalid_type_config(self): - # test invalid jsonpath... - cfg = dict(type='bogus', fields='payload.test') - self.assertRaises(declarative.DefinitionException, - converter.TraitDefinition, - 'bogus_trait', - cfg, - self.fake_plugin_mgr) - - -class TestEventDefinition(ConverterBase): - - def setUp(self): - super(TestEventDefinition, self).setUp() - - self.traits_cfg = { - 'instance_id': { - 'type': 'text', - 'fields': ['payload.instance_uuid', - 'payload.instance_id'], - }, - 'host': { - 'type': 'text', - 'fields': 'payload.host', - }, - } - - self.test_notification1 = self._create_test_notification( - "test.thing", - "uuid-for-notif-0001", - instance_id="uuid-for-instance-0001", - host='host-1-2-3') - - self.test_notification2 = self._create_test_notification( - "test.thing", - "uuid-for-notif-0002", - instance_id="uuid-for-instance-0002") - - self.test_notification3 = self._create_test_notification( - "test.thing", - "uuid-for-notif-0003", - instance_id="uuid-for-instance-0003", - host=None) - self.fake_plugin_mgr = {} - - def test_to_event(self): - dtype = models.Trait.TEXT_TYPE - cfg = dict(event_type='test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - - e = edef.to_event(self.test_notification1) - self.assertEqual('test.thing', e.event_type) - self.assertEqual(datetime.datetime(2013, 8, 8, 21, 6, 37, 803826), - e.generated) - - self.assertHasDefaultTraits(e) - self.assertHasTrait(e, 'host', value='host-1-2-3', dtype=dtype) - self.assertHasTrait(e, 'instance_id', - value='uuid-for-instance-0001', - dtype=dtype) - - def test_to_event_missing_trait(self): - dtype = models.Trait.TEXT_TYPE - cfg = dict(event_type='test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - - e = edef.to_event(self.test_notification2) - - self.assertHasDefaultTraits(e) - self.assertHasTrait(e, 'instance_id', - value='uuid-for-instance-0002', - dtype=dtype) - self.assertDoesNotHaveTrait(e, 'host') - - def test_to_event_null_trait(self): - dtype = models.Trait.TEXT_TYPE - cfg = dict(event_type='test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - - e = edef.to_event(self.test_notification3) - - self.assertHasDefaultTraits(e) - self.assertHasTrait(e, 'instance_id', - value='uuid-for-instance-0003', - dtype=dtype) - self.assertDoesNotHaveTrait(e, 'host') - - def test_bogus_cfg_no_traits(self): - bogus = dict(event_type='test.foo') - self.assertRaises(declarative.DefinitionException, - converter.EventDefinition, - bogus, - self.fake_plugin_mgr) - - def test_bogus_cfg_no_type(self): - bogus = dict(traits=self.traits_cfg) - self.assertRaises(declarative.DefinitionException, - converter.EventDefinition, - bogus, - self.fake_plugin_mgr) - - def test_included_type_string(self): - cfg = dict(event_type='test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertEqual(1, len(edef._included_types)) - self.assertEqual('test.thing', edef._included_types[0]) - self.assertEqual(0, len(edef._excluded_types)) - self.assertTrue(edef.included_type('test.thing')) - self.assertFalse(edef.excluded_type('test.thing')) - self.assertTrue(edef.match_type('test.thing')) - self.assertFalse(edef.match_type('random.thing')) - - def test_included_type_list(self): - cfg = dict(event_type=['test.thing', 'other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertEqual(2, len(edef._included_types)) - self.assertEqual(0, len(edef._excluded_types)) - self.assertTrue(edef.included_type('test.thing')) - self.assertTrue(edef.included_type('other.thing')) - self.assertFalse(edef.excluded_type('test.thing')) - self.assertTrue(edef.match_type('test.thing')) - self.assertTrue(edef.match_type('other.thing')) - self.assertFalse(edef.match_type('random.thing')) - - def test_excluded_type_string(self): - cfg = dict(event_type='!test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertEqual(1, len(edef._included_types)) - self.assertEqual('*', edef._included_types[0]) - self.assertEqual('test.thing', edef._excluded_types[0]) - self.assertEqual(1, len(edef._excluded_types)) - self.assertEqual('test.thing', edef._excluded_types[0]) - self.assertTrue(edef.excluded_type('test.thing')) - self.assertTrue(edef.included_type('random.thing')) - self.assertFalse(edef.match_type('test.thing')) - self.assertTrue(edef.match_type('random.thing')) - - def test_excluded_type_list(self): - cfg = dict(event_type=['!test.thing', '!other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertEqual(1, len(edef._included_types)) - self.assertEqual(2, len(edef._excluded_types)) - self.assertTrue(edef.excluded_type('test.thing')) - self.assertTrue(edef.excluded_type('other.thing')) - self.assertFalse(edef.excluded_type('random.thing')) - self.assertFalse(edef.match_type('test.thing')) - self.assertFalse(edef.match_type('other.thing')) - self.assertTrue(edef.match_type('random.thing')) - - def test_mixed_type_list(self): - cfg = dict(event_type=['*.thing', '!test.thing', '!other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertEqual(1, len(edef._included_types)) - self.assertEqual(2, len(edef._excluded_types)) - self.assertTrue(edef.excluded_type('test.thing')) - self.assertTrue(edef.excluded_type('other.thing')) - self.assertFalse(edef.excluded_type('random.thing')) - self.assertFalse(edef.match_type('test.thing')) - self.assertFalse(edef.match_type('other.thing')) - self.assertFalse(edef.match_type('random.whatzit')) - self.assertTrue(edef.match_type('random.thing')) - - def test_catchall(self): - cfg = dict(event_type=['*.thing', '!test.thing', '!other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertFalse(edef.is_catchall) - - cfg = dict(event_type=['!other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertFalse(edef.is_catchall) - - cfg = dict(event_type=['other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertFalse(edef.is_catchall) - - cfg = dict(event_type=['*', '!other.thing'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertFalse(edef.is_catchall) - - cfg = dict(event_type=['*'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertTrue(edef.is_catchall) - - cfg = dict(event_type=['*', 'foo'], - traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - self.assertTrue(edef.is_catchall) - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_extract_when(self, mock_utcnow): - now = datetime.datetime.utcnow() - modified = now + datetime.timedelta(minutes=1) - mock_utcnow.return_value = now - - body = {"timestamp": str(modified)} - when = converter.EventDefinition._extract_when(body) - self.assertTimestampEqual(modified, when) - - body = {"_context_timestamp": str(modified)} - when = converter.EventDefinition._extract_when(body) - self.assertTimestampEqual(modified, when) - - then = now + datetime.timedelta(hours=1) - body = {"timestamp": str(modified), "_context_timestamp": str(then)} - when = converter.EventDefinition._extract_when(body) - self.assertTimestampEqual(modified, when) - - when = converter.EventDefinition._extract_when({}) - self.assertTimestampEqual(now, when) - - def test_default_traits(self): - cfg = dict(event_type='test.thing', traits={}) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - default_traits = converter.EventDefinition.DEFAULT_TRAITS.keys() - traits = set(edef.traits.keys()) - for dt in default_traits: - self.assertIn(dt, traits) - self.assertEqual(len(converter.EventDefinition.DEFAULT_TRAITS), - len(edef.traits)) - - def test_traits(self): - cfg = dict(event_type='test.thing', traits=self.traits_cfg) - edef = converter.EventDefinition(cfg, self.fake_plugin_mgr) - default_traits = converter.EventDefinition.DEFAULT_TRAITS.keys() - traits = set(edef.traits.keys()) - for dt in default_traits: - self.assertIn(dt, traits) - self.assertIn('host', traits) - self.assertIn('instance_id', traits) - self.assertEqual(len(converter.EventDefinition.DEFAULT_TRAITS) + 2, - len(edef.traits)) - - -class TestNotificationConverter(ConverterBase): - - def setUp(self): - super(TestNotificationConverter, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - ceilometer_service.prepare_service(argv=[], config_files=[]) - self.valid_event_def1 = [{ - 'event_type': 'compute.instance.create.*', - 'traits': { - 'instance_id': { - 'type': 'text', - 'fields': ['payload.instance_uuid', - 'payload.instance_id'], - }, - 'host': { - 'type': 'text', - 'fields': 'payload.host', - }, - }, - }] - - self.test_notification1 = self._create_test_notification( - "compute.instance.create.start", - "uuid-for-notif-0001", - instance_id="uuid-for-instance-0001", - host='host-1-2-3') - self.test_notification2 = self._create_test_notification( - "bogus.notification.from.mars", - "uuid-for-notif-0002", - weird='true', - host='cydonia') - self.fake_plugin_mgr = {} - - @mock.patch('oslo_utils.timeutils.utcnow') - def test_converter_missing_keys(self, mock_utcnow): - # test a malformed notification - now = datetime.datetime.utcnow() - mock_utcnow.return_value = now - c = converter.NotificationEventsConverter( - [], - self.fake_plugin_mgr, - add_catchall=True) - message = {'event_type': "foo", - 'message_id': "abc", - 'publisher_id': "1"} - e = c.to_event(message) - self.assertIsValidEvent(e, message) - self.assertEqual(1, len(e.traits)) - self.assertEqual("foo", e.event_type) - self.assertEqual(now, e.generated) - - def test_converter_with_catchall(self): - c = converter.NotificationEventsConverter( - self.valid_event_def1, - self.fake_plugin_mgr, - add_catchall=True) - self.assertEqual(2, len(c.definitions)) - e = c.to_event(self.test_notification1) - self.assertIsValidEvent(e, self.test_notification1) - self.assertEqual(3, len(e.traits)) - self.assertHasDefaultTraits(e) - self.assertHasTrait(e, 'instance_id') - self.assertHasTrait(e, 'host') - - e = c.to_event(self.test_notification2) - self.assertIsValidEvent(e, self.test_notification2) - self.assertEqual(1, len(e.traits)) - self.assertHasDefaultTraits(e) - self.assertDoesNotHaveTrait(e, 'instance_id') - self.assertDoesNotHaveTrait(e, 'host') - - def test_converter_without_catchall(self): - c = converter.NotificationEventsConverter( - self.valid_event_def1, - self.fake_plugin_mgr, - add_catchall=False) - self.assertEqual(1, len(c.definitions)) - e = c.to_event(self.test_notification1) - self.assertIsValidEvent(e, self.test_notification1) - self.assertEqual(3, len(e.traits)) - self.assertHasDefaultTraits(e) - self.assertHasTrait(e, 'instance_id') - self.assertHasTrait(e, 'host') - - e = c.to_event(self.test_notification2) - self.assertIsNotValidEvent(e, self.test_notification2) - - def test_converter_empty_cfg_with_catchall(self): - c = converter.NotificationEventsConverter( - [], - self.fake_plugin_mgr, - add_catchall=True) - self.assertEqual(1, len(c.definitions)) - e = c.to_event(self.test_notification1) - self.assertIsValidEvent(e, self.test_notification1) - self.assertEqual(1, len(e.traits)) - self.assertHasDefaultTraits(e) - - e = c.to_event(self.test_notification2) - self.assertIsValidEvent(e, self.test_notification2) - self.assertEqual(1, len(e.traits)) - self.assertHasDefaultTraits(e) - - def test_converter_empty_cfg_without_catchall(self): - c = converter.NotificationEventsConverter( - [], - self.fake_plugin_mgr, - add_catchall=False) - self.assertEqual(0, len(c.definitions)) - e = c.to_event(self.test_notification1) - self.assertIsNotValidEvent(e, self.test_notification1) - - e = c.to_event(self.test_notification2) - self.assertIsNotValidEvent(e, self.test_notification2) - - @staticmethod - def _convert_message(convert, level): - message = {'priority': level, 'event_type': "foo", - 'message_id': "abc", 'publisher_id': "1"} - return convert.to_event(message) - - def test_store_raw_all(self): - self.CONF.event.store_raw = ['info', 'error'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertTrue(self._convert_message(c, 'info').raw) - self.assertTrue(self._convert_message(c, 'error').raw) - - def test_store_raw_info_only(self): - self.CONF.event.store_raw = ['info'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertTrue(self._convert_message(c, 'info').raw) - self.assertFalse(self._convert_message(c, 'error').raw) - - def test_store_raw_error_only(self): - self.CONF.event.store_raw = ['error'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertFalse(self._convert_message(c, 'info').raw) - self.assertTrue(self._convert_message(c, 'error').raw) - - def test_store_raw_skip_all(self): - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertFalse(self._convert_message(c, 'info').raw) - self.assertFalse(self._convert_message(c, 'error').raw) - - def test_store_raw_info_only_no_case(self): - self.CONF.event.store_raw = ['INFO'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertTrue(self._convert_message(c, 'info').raw) - self.assertFalse(self._convert_message(c, 'error').raw) - - def test_store_raw_bad_skip_all(self): - self.CONF.event.store_raw = ['unknown'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertFalse(self._convert_message(c, 'info').raw) - self.assertFalse(self._convert_message(c, 'error').raw) - - def test_store_raw_bad_and_good(self): - self.CONF.event.store_raw = ['info', 'unknown'] - c = converter.NotificationEventsConverter( - [], self.fake_plugin_mgr) - self.assertTrue(self._convert_message(c, 'info').raw) - self.assertFalse(self._convert_message(c, 'error').raw) - - def test_setup_events_default_config(self): - self.CONF.set_override('definitions_cfg_file', - '/not/existing/file', group='event') - self.CONF.set_override('drop_unmatched_notifications', - False, group='event') - - c = converter.setup_events(self.fake_plugin_mgr) - self.assertIsInstance(c, converter.NotificationEventsConverter) - self.assertEqual(1, len(c.definitions)) - self.assertTrue(c.definitions[0].is_catchall) - - self.CONF.set_override('drop_unmatched_notifications', - True, group='event') - - c = converter.setup_events(self.fake_plugin_mgr) - self.assertIsInstance(c, converter.NotificationEventsConverter) - self.assertEqual(0, len(c.definitions)) diff --git a/ceilometer/tests/unit/event/test_endpoint.py b/ceilometer/tests/unit/event/test_endpoint.py deleted file mode 100644 index c79d3b22..00000000 --- a/ceilometer/tests/unit/event/test_endpoint.py +++ /dev/null @@ -1,200 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for Ceilometer notify daemon.""" - -import mock -from oslo_config import cfg -from oslo_config import fixture as fixture_config -import oslo_messaging -from oslo_utils import fileutils -from oslotest import mockpatch -import six -import yaml - -from ceilometer.event import endpoint as event_endpoint -from ceilometer import pipeline -from ceilometer import publisher -from ceilometer.publisher import test -from ceilometer.tests import base as tests_base - - -TEST_NOTICE_CTXT = { - u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'is_admin': True, - u'project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'quota_class': None, - u'read_deleted': u'no', - u'remote_address': u'10.0.2.15', - u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', - u'roles': [u'admin'], - u'timestamp': u'2012-05-08T20:23:41.425105', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', -} - -TEST_NOTICE_METADATA = { - u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', - u'timestamp': u'2012-05-08 20:23:48.028195', -} - -TEST_NOTICE_PAYLOAD = { - u'created_at': u'2012-05-08 20:23:41', - u'deleted_at': u'', - u'disk_gb': 0, - u'display_name': u'testme', - u'fixed_ips': [{u'address': u'10.0.0.2', - u'floating_ips': [], - u'meta': {}, - u'type': u'fixed', - u'version': 4}], - u'image_ref_url': u'http://10.0.2.15:9292/images/UUID', - u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1', - u'instance_type': u'm1.tiny', - u'instance_type_id': 2, - u'launched_at': u'2012-05-08 20:23:47.985999', - u'memory_mb': 512, - u'state': u'active', - u'state_description': u'', - u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e', - u'user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3', - u'vcpus': 1, - u'root_gb': 0, - u'ephemeral_gb': 0, - u'host': u'compute-host-name', - u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4', - u'os_type': u'linux?', - u'architecture': u'x86', - u'image_ref': u'UUID', - u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5', - u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6', -} - - -cfg.CONF.import_opt('store_events', 'ceilometer.notification', - group='notification') - - -class TestEventEndpoint(tests_base.BaseTestCase): - - def get_publisher(self, url, namespace=''): - fake_drivers = {'test://': test.TestPublisher, - 'except://': test.TestPublisher} - return fake_drivers[url](url) - - def _setup_pipeline(self, publishers): - ev_pipeline = yaml.dump({ - 'sources': [{ - 'name': 'test_event', - 'events': ['test.test'], - 'sinks': ['test_sink'] - }], - 'sinks': [{ - 'name': 'test_sink', - 'publishers': publishers - }] - }) - - if six.PY3: - ev_pipeline = ev_pipeline.encode('utf-8') - ev_pipeline_cfg_file = fileutils.write_to_tempfile( - content=ev_pipeline, prefix="event_pipeline", suffix="yaml") - self.CONF.set_override('event_pipeline_cfg_file', - ev_pipeline_cfg_file) - - ev_pipeline_mgr = pipeline.setup_event_pipeline() - return ev_pipeline_mgr - - def _setup_endpoint(self, publishers): - ev_pipeline_mgr = self._setup_pipeline(publishers) - self.endpoint = event_endpoint.EventsNotificationEndpoint( - ev_pipeline_mgr) - - self.endpoint.event_converter = mock.MagicMock() - self.endpoint.event_converter.to_event.return_value = mock.MagicMock( - event_type='test.test') - - def setUp(self): - super(TestEventEndpoint, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF([]) - self.CONF.set_override("connection", "log://", group='database') - self.CONF.set_override("store_events", True, group="notification") - self.setup_messaging(self.CONF) - - self.useFixture(mockpatch.PatchObject(publisher, 'get_publisher', - side_effect=self.get_publisher)) - self.fake_publisher = mock.Mock() - self.useFixture(mockpatch.Patch( - 'ceilometer.publisher.test.TestPublisher', - return_value=self.fake_publisher)) - - def test_message_to_event(self): - self._setup_endpoint(['test://']) - self.endpoint.info([{'ctxt': TEST_NOTICE_CTXT, - 'publisher_id': 'compute.vagrant-precise', - 'event_type': 'compute.instance.create.end', - 'payload': TEST_NOTICE_PAYLOAD, - 'metadata': TEST_NOTICE_METADATA}]) - - def test_bad_event_non_ack_and_requeue(self): - self._setup_endpoint(['test://']) - self.fake_publisher.publish_events.side_effect = Exception - self.CONF.set_override("ack_on_event_error", False, - group="notification") - ret = self.endpoint.info([{'ctxt': TEST_NOTICE_CTXT, - 'publisher_id': 'compute.vagrant-precise', - 'event_type': 'compute.instance.create.end', - 'payload': TEST_NOTICE_PAYLOAD, - 'metadata': TEST_NOTICE_METADATA}]) - - self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) - - def test_message_to_event_bad_event(self): - self._setup_endpoint(['test://']) - self.fake_publisher.publish_events.side_effect = Exception - self.CONF.set_override("ack_on_event_error", False, - group="notification") - - message = { - 'payload': {'event_type': "foo", 'message_id': "abc"}, - 'metadata': {}, - 'ctxt': {} - } - with mock.patch("ceilometer.pipeline.LOG") as mock_logger: - ret = self.endpoint.process_notification('info', [message]) - self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) - exception_mock = mock_logger.exception - self.assertIn('Exit after error from publisher', - exception_mock.call_args_list[0][0][0]) - - def test_message_to_event_bad_event_multi_publish(self): - - self._setup_endpoint(['test://', 'except://']) - - self.fake_publisher.publish_events.side_effect = Exception - self.CONF.set_override("ack_on_event_error", False, - group="notification") - - message = { - 'payload': {'event_type': "foo", 'message_id': "abc"}, - 'metadata': {}, - 'ctxt': {} - } - with mock.patch("ceilometer.pipeline.LOG") as mock_logger: - ret = self.endpoint.process_notification('info', [message]) - self.assertEqual(oslo_messaging.NotificationResult.HANDLED, ret) - exception_mock = mock_logger.exception - self.assertIn('Continue after error from publisher', - exception_mock.call_args_list[0][0][0]) diff --git a/ceilometer/tests/unit/event/test_trait_plugins.py b/ceilometer/tests/unit/event/test_trait_plugins.py deleted file mode 100644 index 6f8fe167..00000000 --- a/ceilometer/tests/unit/event/test_trait_plugins.py +++ /dev/null @@ -1,115 +0,0 @@ -# -# Copyright 2013 Rackspace Hosting. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslotest import base - -from ceilometer.event import trait_plugins - - -class TestSplitterPlugin(base.BaseTestCase): - - def setUp(self): - super(TestSplitterPlugin, self).setUp() - self.pclass = trait_plugins.SplitterTraitPlugin - - def test_split(self): - param = dict(separator='-', segment=0) - plugin = self.pclass(**param) - match_list = [('test.thing', 'test-foobar-baz')] - value = plugin.trait_values(match_list)[0] - self.assertEqual('test', value) - - param = dict(separator='-', segment=1) - plugin = self.pclass(**param) - match_list = [('test.thing', 'test-foobar-baz')] - value = plugin.trait_values(match_list)[0] - self.assertEqual('foobar', value) - - param = dict(separator='-', segment=1, max_split=1) - plugin = self.pclass(**param) - match_list = [('test.thing', 'test-foobar-baz')] - value = plugin.trait_values(match_list)[0] - self.assertEqual('foobar-baz', value) - - def test_no_sep(self): - param = dict(separator='-', segment=0) - plugin = self.pclass(**param) - match_list = [('test.thing', 'test.foobar.baz')] - value = plugin.trait_values(match_list)[0] - self.assertEqual('test.foobar.baz', value) - - def test_no_segment(self): - param = dict(separator='-', segment=5) - plugin = self.pclass(**param) - match_list = [('test.thing', 'test-foobar-baz')] - value = plugin.trait_values(match_list)[0] - self.assertIs(None, value) - - def test_no_match(self): - param = dict(separator='-', segment=0) - plugin = self.pclass(**param) - match_list = [] - value = plugin.trait_values(match_list) - self.assertEqual([], value) - - -class TestBitfieldPlugin(base.BaseTestCase): - - def setUp(self): - super(TestBitfieldPlugin, self).setUp() - self.pclass = trait_plugins.BitfieldTraitPlugin - self.init = 0 - self.params = dict(initial_bitfield=self.init, - flags=[dict(path='payload.foo', bit=0, value=42), - dict(path='payload.foo', bit=1, value=12), - dict(path='payload.thud', bit=1, value=23), - dict(path='thingy.boink', bit=4), - dict(path='thingy.quux', bit=6, - value="wokka"), - dict(path='payload.bar', bit=10, - value='test')]) - - def test_bitfield(self): - match_list = [('payload.foo', 12), - ('payload.bar', 'test'), - ('thingy.boink', 'testagain')] - - plugin = self.pclass(**self.params) - value = plugin.trait_values(match_list) - self.assertEqual(0x412, value[0]) - - def test_initial(self): - match_list = [('payload.foo', 12), - ('payload.bar', 'test'), - ('thingy.boink', 'testagain')] - self.params['initial_bitfield'] = 0x2000 - plugin = self.pclass(**self.params) - value = plugin.trait_values(match_list) - self.assertEqual(0x2412, value[0]) - - def test_no_match(self): - match_list = [] - plugin = self.pclass(**self.params) - value = plugin.trait_values(match_list) - self.assertEqual(self.init, value[0]) - - def test_multi(self): - match_list = [('payload.foo', 12), - ('payload.thud', 23), - ('payload.bar', 'test'), - ('thingy.boink', 'testagain')] - - plugin = self.pclass(**self.params) - value = plugin.trait_values(match_list) - self.assertEqual(0x412, value[0]) diff --git a/ceilometer/tests/unit/hardware/__init__.py b/ceilometer/tests/unit/hardware/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/hardware/inspector/__init__.py b/ceilometer/tests/unit/hardware/inspector/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/hardware/inspector/test_inspector.py b/ceilometer/tests/unit/hardware/inspector/test_inspector.py deleted file mode 100644 index 22a245c4..00000000 --- a/ceilometer/tests/unit/hardware/inspector/test_inspector.py +++ /dev/null @@ -1,33 +0,0 @@ -# -# Copyright 2014 Intel Corp -# -# Authors: Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_utils import netutils - -from ceilometer.hardware import inspector -from ceilometer.tests import base - - -class TestHardwareInspector(base.BaseTestCase): - def test_get_inspector(self): - url = netutils.urlsplit("snmp://") - driver = inspector.get_inspector(url) - self.assertTrue(driver) - - def test_get_inspector_illegal(self): - url = netutils.urlsplit("illegal://") - self.assertRaises(RuntimeError, - inspector.get_inspector, - url) diff --git a/ceilometer/tests/unit/hardware/inspector/test_snmp.py b/ceilometer/tests/unit/hardware/inspector/test_snmp.py deleted file mode 100644 index 71b94c02..00000000 --- a/ceilometer/tests/unit/hardware/inspector/test_snmp.py +++ /dev/null @@ -1,209 +0,0 @@ -# -# Copyright 2013 Intel Corp -# -# Authors: Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/hardware/inspector/snmp/inspector.py -""" -from oslo_utils import netutils -from oslotest import mockpatch - -from ceilometer.hardware.inspector import snmp -from ceilometer.tests import base as test_base - -ins = snmp.SNMPInspector - - -class FakeObjectName(object): - def __init__(self, name): - self.name = name - - def __str__(self): - return str(self.name) - - -def faux_getCmd_new(authData, transportTarget, *oids, **kwargs): - varBinds = [(FakeObjectName(oid), - int(oid.split('.')[-1])) for oid in oids] - return (None, None, 0, varBinds) - - -def faux_bulkCmd_new(authData, transportTarget, nonRepeaters, maxRepetitions, - *oids, **kwargs): - varBindTable = [ - [(FakeObjectName(oid + ".%d" % i), i) for i in range(1, 3)] - for oid in oids - ] - return (None, None, 0, varBindTable) - - -class TestSNMPInspector(test_base.BaseTestCase): - mapping = { - 'test_exact': { - 'matching_type': snmp.EXACT, - 'metric_oid': ('1.3.6.1.4.1.2021.10.1.3.1', int), - 'metadata': { - 'meta': ('1.3.6.1.4.1.2021.10.1.3.8', int) - }, - 'post_op': '_fake_post_op', - }, - 'test_prefix': { - 'matching_type': snmp.PREFIX, - 'metric_oid': ('1.3.6.1.4.1.2021.9.1.8', int), - 'metadata': { - 'meta': ('1.3.6.1.4.1.2021.9.1.3', int) - }, - 'post_op': None, - }, - } - - def setUp(self): - super(TestSNMPInspector, self).setUp() - self.inspector = snmp.SNMPInspector() - self.host = netutils.urlsplit("snmp://localhost") - self.useFixture(mockpatch.PatchObject( - self.inspector._cmdGen, 'getCmd', new=faux_getCmd_new)) - self.useFixture(mockpatch.PatchObject( - self.inspector._cmdGen, 'bulkCmd', new=faux_bulkCmd_new)) - - def test_snmp_error(self): - def get_list(func, *args, **kwargs): - return list(func(*args, **kwargs)) - - def faux_parse(ret, is_bulk): - return (True, 'forced error') - - self.useFixture(mockpatch.PatchObject( - snmp, 'parse_snmp_return', new=faux_parse)) - - self.assertRaises(snmp.SNMPException, - get_list, - self.inspector.inspect_generic, - host=self.host, - cache={}, - extra_metadata={}, - param=self.mapping['test_exact']) - - @staticmethod - def _fake_post_op(host, cache, meter_def, value, metadata, extra, suffix): - metadata.update(post_op_meta=4) - extra.update(project_id=2) - return value - - def test_inspect_generic_exact(self): - self.inspector._fake_post_op = self._fake_post_op - cache = {} - ret = list(self.inspector.inspect_generic(self.host, - cache, - {}, - self.mapping['test_exact'])) - keys = cache[ins._CACHE_KEY_OID].keys() - self.assertIn('1.3.6.1.4.1.2021.10.1.3.1', keys) - self.assertIn('1.3.6.1.4.1.2021.10.1.3.8', keys) - self.assertEqual(1, len(ret)) - self.assertEqual(1, ret[0][0]) - self.assertEqual(8, ret[0][1]['meta']) - self.assertEqual(4, ret[0][1]['post_op_meta']) - self.assertEqual(2, ret[0][2]['project_id']) - - def test_inspect_generic_prefix(self): - cache = {} - ret = list(self.inspector.inspect_generic(self.host, - cache, - {}, - self.mapping['test_prefix'])) - keys = cache[ins._CACHE_KEY_OID].keys() - self.assertIn('1.3.6.1.4.1.2021.9.1.8' + '.1', keys) - self.assertIn('1.3.6.1.4.1.2021.9.1.8' + '.2', keys) - self.assertIn('1.3.6.1.4.1.2021.9.1.3' + '.1', keys) - self.assertIn('1.3.6.1.4.1.2021.9.1.3' + '.2', keys) - self.assertEqual(2, len(ret)) - self.assertIn(ret[0][0], (1, 2)) - self.assertEqual(ret[0][0], ret[0][1]['meta']) - - def test_post_op_net(self): - self.useFixture(mockpatch.PatchObject( - self.inspector._cmdGen, 'bulkCmd', new=faux_bulkCmd_new)) - cache = {} - metadata = dict(name='lo', - speed=0, - mac='ba21e43302fe') - extra = {} - ret = self.inspector._post_op_net(self.host, cache, None, - value=8, - metadata=metadata, - extra=extra, - suffix=".2") - self.assertEqual(8, ret) - self.assertIn('ip', metadata) - self.assertIn("2", metadata['ip']) - self.assertIn('resource_id', extra) - self.assertEqual("localhost.lo", extra['resource_id']) - - def test_post_op_disk(self): - cache = {} - metadata = dict(device='/dev/sda1', - path='/') - extra = {} - ret = self.inspector._post_op_disk(self.host, cache, None, - value=8, - metadata=metadata, - extra=extra, - suffix=None) - self.assertEqual(8, ret) - self.assertIn('resource_id', extra) - self.assertEqual("localhost./dev/sda1", extra['resource_id']) - - def test_prepare_params(self): - param = {'post_op': '_post_op_disk', - 'oid': '1.3.6.1.4.1.2021.9.1.6', - 'type': 'int', - 'matching_type': 'type_prefix', - 'metadata': { - 'device': {'oid': '1.3.6.1.4.1.2021.9.1.3', - 'type': 'str'}, - 'path': {'oid': '1.3.6.1.4.1.2021.9.1.2', - 'type': "lambda x: str(x)"}}} - processed = self.inspector.prepare_params(param) - self.assertEqual('_post_op_disk', processed['post_op']) - self.assertEqual('1.3.6.1.4.1.2021.9.1.6', processed['metric_oid'][0]) - self.assertEqual(int, processed['metric_oid'][1]) - self.assertEqual(snmp.PREFIX, processed['matching_type']) - self.assertEqual(2, len(processed['metadata'].keys())) - self.assertEqual('1.3.6.1.4.1.2021.9.1.2', - processed['metadata']['path'][0]) - self.assertEqual("4", - processed['metadata']['path'][1](4)) - - def test_pysnmp_ver43(self): - # Test pysnmp version >=4.3 compatibility of ObjectIdentifier - from distutils.version import StrictVersion - import pysnmp - - has43 = StrictVersion(pysnmp.__version__) >= StrictVersion('4.3.0') - oid = '1.3.6.4.1.2021.11.57.0' - - if has43: - from pysnmp.entity import engine - from pysnmp.smi import rfc1902 - from pysnmp.smi import view - snmp_engine = engine.SnmpEngine() - mvc = view.MibViewController(snmp_engine.getMibBuilder()) - name = rfc1902.ObjectIdentity(oid) - name.resolveWithMib(mvc) - else: - from pysnmp.proto import rfc1902 - name = rfc1902.ObjectName(oid) - - self.assertEqual(oid, str(name)) diff --git a/ceilometer/tests/unit/hardware/pollsters/__init__.py b/ceilometer/tests/unit/hardware/pollsters/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/hardware/pollsters/test_generic.py b/ceilometer/tests/unit/hardware/pollsters/test_generic.py deleted file mode 100644 index 35d31727..00000000 --- a/ceilometer/tests/unit/hardware/pollsters/test_generic.py +++ /dev/null @@ -1,185 +0,0 @@ -# -# Copyright 2015 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import six -import yaml - -from oslo_config import fixture as fixture_config -from oslo_utils import fileutils -from oslotest import mockpatch - -from ceilometer import declarative -from ceilometer.hardware.inspector import base as inspector_base -from ceilometer.hardware.pollsters import generic -from ceilometer import sample -from ceilometer.tests import base as test_base - - -class TestMeterDefinition(test_base.BaseTestCase): - def test_config_definition(self): - cfg = dict(name='test', - type='gauge', - unit='B', - snmp_inspector={}) - definition = generic.MeterDefinition(cfg) - self.assertEqual('test', definition.name) - self.assertEqual('gauge', definition.type) - self.assertEqual('B', definition.unit) - self.assertEqual({}, definition.snmp_inspector) - - def test_config_missing_field(self): - cfg = dict(name='test', type='gauge') - try: - generic.MeterDefinition(cfg) - except declarative.MeterDefinitionException as e: - self.assertEqual("Missing field unit", e.brief_message) - - def test_config_invalid_field(self): - cfg = dict(name='test', - type='gauge', - unit='B', - invalid={}) - definition = generic.MeterDefinition(cfg) - self.assertEqual("foobar", getattr(definition, 'invalid', 'foobar')) - - def test_config_invalid_type_field(self): - cfg = dict(name='test', - type='invalid', - unit='B', - snmp_inspector={}) - try: - generic.MeterDefinition(cfg) - except declarative.MeterDefinitionException as e: - self.assertEqual("Unrecognized type value invalid", - e.brief_message) - - @mock.patch('ceilometer.hardware.pollsters.generic.LOG') - def test_bad_metric_skip(self, LOG): - cfg = {'metric': [dict(name='test1', - type='gauge', - unit='B', - snmp_inspector={}), - dict(name='test_bad', - type='invalid', - unit='B', - snmp_inspector={}), - dict(name='test2', - type='gauge', - unit='B', - snmp_inspector={})]} - data = generic.load_definition(cfg) - self.assertEqual(2, len(data)) - LOG.error.assert_called_with( - "Error loading meter definition: %s", - "Unrecognized type value invalid") - - -class FakeInspector(inspector_base.Inspector): - net_metadata = dict(name='test.teest', - mac='001122334455', - ip='10.0.0.2', - speed=1000) - DATA = { - 'test': (0.99, {}, {}), - 'test2': (90, net_metadata, {}), - } - - def inspect_generic(self, host, cache, - extra_metadata=None, param=None): - yield self.DATA[host.hostname] - - -class TestGenericPollsters(test_base.BaseTestCase): - @staticmethod - def faux_get_inspector(url, namespace=None): - return FakeInspector() - - def setUp(self): - super(TestGenericPollsters, self).setUp() - self.conf = self.useFixture(fixture_config.Config()).conf - self.resources = ["snmp://test", "snmp://test2"] - self.useFixture(mockpatch.Patch( - 'ceilometer.hardware.inspector.get_inspector', - self.faux_get_inspector)) - self.conf(args=[]) - self.pollster = generic.GenericHardwareDeclarativePollster() - - def _setup_meter_def_file(self, cfg): - if six.PY3: - cfg = cfg.encode('utf-8') - meter_cfg_file = fileutils.write_to_tempfile(content=cfg, - prefix="snmp", - suffix="yaml") - self.conf.set_override( - 'meter_definitions_file', - meter_cfg_file, group='hardware') - cfg = declarative.load_definitions( - {}, self.conf.hardware.meter_definitions_file) - return cfg - - def _check_get_samples(self, name, definition, - expected_value, expected_type, expected_unit=None): - self.pollster._update_meter_definition(definition) - cache = {} - samples = list(self.pollster.get_samples(None, cache, - self.resources)) - self.assertTrue(samples) - self.assertIn(self.pollster.CACHE_KEY, cache) - for resource in self.resources: - self.assertIn(resource, cache[self.pollster.CACHE_KEY]) - - self.assertEqual(set([name]), - set([s.name for s in samples])) - match = [s for s in samples if s.name == name] - self.assertEqual(expected_value, match[0].volume) - self.assertEqual(expected_type, match[0].type) - if expected_unit: - self.assertEqual(expected_unit, match[0].unit) - - def test_get_samples(self): - param = dict(matching_type='type_exact', - oid='1.3.6.1.4.1.2021.10.1.3.1', - type='lambda x: float(str(x))') - meter_def = generic.MeterDefinition(dict(type='gauge', - name='hardware.test1', - unit='process', - snmp_inspector=param)) - self._check_get_samples('hardware.test1', - meter_def, - 0.99, sample.TYPE_GAUGE, - expected_unit='process') - - def test_get_pollsters_extensions(self): - param = dict(matching_type='type_exact', - oid='1.3.6.1.4.1.2021.10.1.3.1', - type='lambda x: float(str(x))') - meter_cfg = yaml.dump( - {'metric': [dict(type='gauge', - name='hardware.test1', - unit='process', - snmp_inspector=param), - dict(type='gauge', - name='hardware.test2.abc', - unit='process', - snmp_inspector=param)]}) - self._setup_meter_def_file(meter_cfg) - pollster = generic.GenericHardwareDeclarativePollster - # Clear cached mapping - pollster.mapping = None - exts = pollster.get_pollsters_extensions() - self.assertEqual(2, len(exts)) - self.assertIn(exts[0].name, ['hardware.test1', 'hardware.test2.abc']) - self.assertIn(exts[1].name, ['hardware.test1', 'hardware.test2.abc']) diff --git a/ceilometer/tests/unit/hardware/pollsters/test_util.py b/ceilometer/tests/unit/hardware/pollsters/test_util.py deleted file mode 100644 index a31cdbb3..00000000 --- a/ceilometer/tests/unit/hardware/pollsters/test_util.py +++ /dev/null @@ -1,61 +0,0 @@ -# -# Copyright 2013 Intel Corp -# -# Authors: Lianhao Lu -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_utils import netutils - -from ceilometer.hardware.pollsters import util -from ceilometer import sample -from ceilometer.tests import base as test_base - - -class TestPollsterUtils(test_base.BaseTestCase): - def setUp(self): - super(TestPollsterUtils, self).setUp() - self.host_url = netutils.urlsplit("snmp://127.0.0.1:161") - - def test_make_sample(self): - s = util.make_sample_from_host(self.host_url, - name='test', - sample_type=sample.TYPE_GAUGE, - unit='B', - volume=1, - res_metadata={ - 'metakey': 'metaval', - }) - self.assertEqual('127.0.0.1', s.resource_id) - self.assertIn('snmp://127.0.0.1:161', s.resource_metadata.values()) - self.assertIn('metakey', s.resource_metadata.keys()) - - def test_make_sample_extra(self): - extra = { - 'project_id': 'project', - 'resource_id': 'resource' - } - s = util.make_sample_from_host(self.host_url, - name='test', - sample_type=sample.TYPE_GAUGE, - unit='B', - volume=1, - extra=extra) - self.assertIsNone(s.user_id) - self.assertEqual('project', s.project_id) - self.assertEqual('resource', s.resource_id) - self.assertEqual({'resource_url': 'snmp://127.0.0.1:161', - 'project_id': 'project', - 'resource_id': - 'resource'}, - s.resource_metadata) diff --git a/ceilometer/tests/unit/image/__init__.py b/ceilometer/tests/unit/image/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/image/test_glance.py b/ceilometer/tests/unit/image/test_glance.py deleted file mode 100644 index c9a16cb9..00000000 --- a/ceilometer/tests/unit/image/test_glance.py +++ /dev/null @@ -1,227 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import fixture as fixture_config -from oslotest import base -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.image import glance - -IMAGE_LIST = [ - type('Image', (object,), - {u'status': u'queued', - u'name': "some name", - u'deleted': False, - u'container_format': None, - u'created_at': u'2012-09-18T16:29:46', - u'disk_format': None, - u'updated_at': u'2012-09-18T16:29:46', - u'properties': {}, - u'min_disk': 0, - u'protected': False, - u'id': u'1d21a8d0-25f4-4e0a-b4ec-85f40237676b', - u'location': None, - u'checksum': None, - u'owner': u'4c8364fc20184ed7971b76602aa96184', - u'is_public': True, - u'deleted_at': None, - u'min_ram': 0, - u'size': 2048}), - type('Image', (object,), - {u'status': u'active', - u'name': "hello world", - u'deleted': False, - u'container_format': None, - u'created_at': u'2012-09-18T16:27:41', - u'disk_format': None, - u'updated_at': u'2012-09-18T16:27:41', - u'properties': {}, - u'min_disk': 0, - u'protected': False, - u'id': u'22be9f90-864d-494c-aa74-8035fd535989', - u'location': None, - u'checksum': None, - u'owner': u'9e4f98287a0246daa42eaf4025db99d4', - u'is_public': True, - u'deleted_at': None, - u'min_ram': 0, - u'size': 0}), - type('Image', (object,), - {u'status': u'queued', - u'name': None, - u'deleted': False, - u'container_format': None, - u'created_at': u'2012-09-18T16:23:27', - u'disk_format': "raw", - u'updated_at': u'2012-09-18T16:23:27', - u'properties': {}, - u'min_disk': 0, - u'protected': False, - u'id': u'8d133f6c-38a8-403c-b02c-7071b69b432d', - u'location': None, - u'checksum': None, - u'owner': u'5f8806a76aa34ee8b8fc8397bd154319', - u'is_public': True, - u'deleted_at': None, - u'min_ram': 0, - u'size': 1024}), - type('Image', (object,), - {u'status': u'queued', - u'name': "some name", - u'deleted': False, - u'container_format': None, - u'created_at': u'2012-09-18T16:29:46', - u'disk_format': None, - u'updated_at': u'2012-09-18T16:29:46', - u'properties': {}, - u'min_disk': 0, - u'protected': False, - u'id': u'e753b196-49b4-48e8-8ca5-09ebd9805f40', - u'location': None, - u'checksum': None, - u'owner': u'4c8364fc20184ed7971b76602aa96184', - u'is_public': True, - u'deleted_at': None, - u'min_ram': 0, - u'size': 2048}), -] - -ENDPOINT = 'end://point' - - -class _BaseObject(object): - pass - - -class FakeGlanceClient(object): - class images(object): - pass - - -class TestManager(manager.AgentManager): - - def __init__(self): - super(TestManager, self).__init__() - self._keystone = mock.Mock() - access = self._keystone.session.auth.get_access.return_value - access.service_catalog.get_endpoints = mock.Mock( - return_value={'image': mock.ANY}) - - -class TestImagePollsterPageSize(base.BaseTestCase): - - @staticmethod - def fake_get_glance_client(ksclient, endpoint): - glanceclient = FakeGlanceClient() - glanceclient.images.list = mock.MagicMock(return_value=IMAGE_LIST) - return glanceclient - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(TestImagePollsterPageSize, self).setUp() - self.manager = TestManager() - self.useFixture(mockpatch.PatchObject( - glance._Base, 'get_glance_client', - side_effect=self.fake_get_glance_client)) - self.CONF = self.useFixture(fixture_config.Config()).conf - - def _do_test_iter_images(self, page_size=0, length=0): - self.CONF.set_override("glance_page_size", page_size) - images = list(glance.ImagePollster(). - _iter_images(self.manager.keystone, {}, ENDPOINT)) - kwargs = {} - if page_size > 0: - kwargs['page_size'] = page_size - FakeGlanceClient.images.list.assert_called_with( - filters={'is_public': None}, **kwargs) - self.assertEqual(length, len(images)) - - def test_page_size(self): - self._do_test_iter_images(100, 4) - - def test_page_size_default(self): - self._do_test_iter_images(length=4) - - def test_page_size_negative_number(self): - self._do_test_iter_images(-1, 4) - - -class TestImagePollster(base.BaseTestCase): - - @staticmethod - def fake_get_glance_client(ksclient, endpoint): - glanceclient = _BaseObject() - setattr(glanceclient, "images", _BaseObject()) - setattr(glanceclient.images, - "list", lambda *args, **kwargs: iter(IMAGE_LIST)) - return glanceclient - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(TestImagePollster, self).setUp() - self.manager = TestManager() - self.useFixture(mockpatch.PatchObject( - glance._Base, 'get_glance_client', - side_effect=self.fake_get_glance_client)) - - def test_default_discovery(self): - pollster = glance.ImagePollster() - self.assertEqual('endpoint:image', pollster.default_discovery) - - def test_iter_images(self): - # Tests whether the iter_images method returns a unique image - # list when there is nothing in the cache - images = list(glance.ImagePollster(). - _iter_images(self.manager.keystone, {}, ENDPOINT)) - self.assertEqual(len(set(image.id for image in images)), len(images)) - - def test_iter_images_cached(self): - # Tests whether the iter_images method returns the values from - # the cache - cache = {'%s-images' % ENDPOINT: []} - images = list(glance.ImagePollster(). - _iter_images(self.manager.keystone, cache, - ENDPOINT)) - self.assertEqual([], images) - - def test_image(self): - samples = list(glance.ImagePollster().get_samples(self.manager, {}, - [ENDPOINT])) - self.assertEqual(4, len(samples)) - for sample in samples: - self.assertEqual(1, sample.volume) - - def test_image_size(self): - samples = list(glance.ImageSizePollster().get_samples(self.manager, - {}, - [ENDPOINT])) - self.assertEqual(4, len(samples)) - for image in IMAGE_LIST: - self.assertTrue( - any(map(lambda sample: sample.volume == image.size, - samples))) - - def test_image_get_sample_names(self): - samples = list(glance.ImagePollster().get_samples(self.manager, {}, - [ENDPOINT])) - self.assertEqual(set(['image']), set([s.name for s in samples])) - - def test_image_size_get_sample_names(self): - samples = list(glance.ImageSizePollster().get_samples(self.manager, - {}, - [ENDPOINT])) - self.assertEqual(set(['image.size']), set([s.name for s in samples])) diff --git a/ceilometer/tests/unit/ipmi/__init__.py b/ceilometer/tests/unit/ipmi/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/ipmi/notifications/__init__.py b/ceilometer/tests/unit/ipmi/notifications/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py b/ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py deleted file mode 100644 index 583219fe..00000000 --- a/ceilometer/tests/unit/ipmi/notifications/ipmi_test_data.py +++ /dev/null @@ -1,795 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Sample data for test_ipmi. - -This data is provided as a sample of the data expected from the ipmitool -driver in the Ironic project, which is the publisher of the notifications -being tested. -""" - - -TEMPERATURE_DATA = { - 'DIMM GH VR Temp (0x3b)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '26 (+/- 0.500) degrees C', - 'Entity ID': '20.6 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'DIMM GH VR Temp (0x3b)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'CPU1 VR Temp (0x36)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '32 (+/- 0.500) degrees C', - 'Entity ID': '20.1 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'CPU1 VR Temp (0x36)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'DIMM EF VR Temp (0x3a)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '26 (+/- 0.500) degrees C', - 'Entity ID': '20.5 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'DIMM EF VR Temp (0x3a)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'CPU2 VR Temp (0x37)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '31 (+/- 0.500) degrees C', - 'Entity ID': '20.2 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'CPU2 VR Temp (0x37)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'Ambient Temp (0x32)': { - 'Status': 'ok', - 'Sensor Reading': '25 (+/- 0) degrees C', - 'Entity ID': '12.1 (Front Panel Board)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Event Message Control': 'Per-threshold', - 'Assertion Events': '', - 'Upper non-critical': '43.000', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Upper non-recoverable': '50.000', - 'Positive Hysteresis': '4.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '46.000', - 'Sensor ID': 'Ambient Temp (0x32)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '25.000' - }, - 'Mezz Card Temp (0x35)': { - 'Status': 'Disabled', - 'Sensor Reading': 'Disabled', - 'Entity ID': '44.1 (I/O Module)', - 'Event Message Control': 'Per-threshold', - 'Upper non-critical': '70.000', - 'Upper non-recoverable': '85.000', - 'Positive Hysteresis': '4.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '80.000', - 'Sensor ID': 'Mezz Card Temp (0x35)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '25.000' - }, - 'PCH Temp (0x3c)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '46 (+/- 0.500) degrees C', - 'Entity ID': '45.1 (Processor/IO Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '93.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '103.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '98.000', - 'Sensor ID': 'PCH Temp (0x3c)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'DIMM CD VR Temp (0x39)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '27 (+/- 0.500) degrees C', - 'Entity ID': '20.4 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'DIMM CD VR Temp (0x39)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'PCI Riser 2 Temp (0x34)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '30 (+/- 0) degrees C', - 'Entity ID': '16.2 (System Internal Expansion Board)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '70.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '85.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '80.000', - 'Sensor ID': 'PCI Riser 2 Temp (0x34)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'DIMM AB VR Temp (0x38)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '28 (+/- 0.500) degrees C', - 'Entity ID': '20.3 (Power Module)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '95.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '105.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '100.000', - 'Sensor ID': 'DIMM AB VR Temp (0x38)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - 'PCI Riser 1 Temp (0x33)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': '38 (+/- 0) degrees C', - 'Entity ID': '16.1 (System Internal Expansion Board)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '70.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '85.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '80.000', - 'Sensor ID': 'PCI Riser 1 Temp (0x33)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, -} - - -CURRENT_DATA = { - 'Avg Power (0x2e)': { - 'Status': 'ok', - 'Sensor Reading': '130 (+/- 0) Watts', - 'Entity ID': '21.0 (Power Management)', - 'Assertions Enabled': '', - 'Event Message Control': 'Per-threshold', - 'Readable Thresholds': 'No Thresholds', - 'Positive Hysteresis': 'Unspecified', - 'Sensor Type (Analog)': 'Current', - 'Negative Hysteresis': 'Unspecified', - 'Maximum sensor range': 'Unspecified', - 'Sensor ID': 'Avg Power (0x2e)', - 'Assertion Events': '', - 'Minimum sensor range': '2550.000', - 'Settable Thresholds': 'No Thresholds' - } -} - - -FAN_DATA = { - 'Fan 4A Tach (0x46)': { - 'Status': 'ok', - 'Sensor Reading': '6900 (+/- 0) RPM', - 'Entity ID': '29.4 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 4A Tach (0x46)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - }, - 'Fan 5A Tach (0x48)': { - 'Status': 'ok', - 'Sensor Reading': '7140 (+/- 0) RPM', - 'Entity ID': '29.5 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 5A Tach (0x48)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - }, - 'Fan 3A Tach (0x44)': { - 'Status': 'ok', - 'Sensor Reading': '6900 (+/- 0) RPM', - 'Entity ID': '29.3 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 3A Tach (0x44)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - }, - 'Fan 1A Tach (0x40)': { - 'Status': 'ok', - 'Sensor Reading': '6960 (+/- 0) RPM', - 'Entity ID': '29.1 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 1A Tach (0x40)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - }, - 'Fan 3B Tach (0x45)': { - 'Status': 'ok', - 'Sensor Reading': '7104 (+/- 0) RPM', - 'Entity ID': '29.3 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 3B Tach (0x45)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 2A Tach (0x42)': { - 'Status': 'ok', - 'Sensor Reading': '7080 (+/- 0) RPM', - 'Entity ID': '29.2 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 2A Tach (0x42)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - }, - 'Fan 4B Tach (0x47)': { - 'Status': 'ok', - 'Sensor Reading': '7488 (+/- 0) RPM', - 'Entity ID': '29.4 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 4B Tach (0x47)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 2B Tach (0x43)': { - 'Status': 'ok', - 'Sensor Reading': '7168 (+/- 0) RPM', - 'Entity ID': '29.2 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 2B Tach (0x43)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 5B Tach (0x49)': { - 'Status': 'ok', - 'Sensor Reading': '7296 (+/- 0) RPM', - 'Entity ID': '29.5 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 5B Tach (0x49)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 1B Tach (0x41)': { - 'Status': 'ok', - 'Sensor Reading': '7296 (+/- 0) RPM', - 'Entity ID': '29.1 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 1B Tach (0x41)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 6B Tach (0x4b)': { - 'Status': 'ok', - 'Sensor Reading': '7616 (+/- 0) RPM', - 'Entity ID': '29.6 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2752.000', - 'Positive Hysteresis': '128.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '16320.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '128.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 6B Tach (0x4b)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3968.000' - }, - 'Fan 6A Tach (0x4a)': { - 'Status': 'ok', - 'Sensor Reading': '7080 (+/- 0) RPM', - 'Entity ID': '29.6 (Fan Device)', - 'Assertions Enabled': 'lcr-', - 'Normal Minimum': '2580.000', - 'Positive Hysteresis': '120.000', - 'Assertion Events': '', - 'Event Message Control': 'Per-threshold', - 'Normal Maximum': '15300.000', - 'Deassertions Enabled': 'lcr-', - 'Sensor Type (Analog)': 'Fan', - 'Lower critical': '1920.000', - 'Negative Hysteresis': '120.000', - 'Threshold Read Mask': 'lcr', - 'Maximum sensor range': 'Unspecified', - 'Readable Thresholds': 'lcr', - 'Sensor ID': 'Fan 6A Tach (0x4a)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4020.000' - } -} - - -VOLTAGE_DATA = { - 'Planar 12V (0x18)': { - 'Status': 'ok', - 'Sensor Reading': '12.312 (+/- 0) Volts', - 'Entity ID': '7.1 (System Board)', - 'Assertions Enabled': 'lcr- ucr+', - 'Event Message Control': 'Per-threshold', - 'Assertion Events': '', - 'Maximum sensor range': 'Unspecified', - 'Positive Hysteresis': '0.108', - 'Deassertions Enabled': 'lcr- ucr+', - 'Sensor Type (Analog)': 'Voltage', - 'Lower critical': '10.692', - 'Negative Hysteresis': '0.108', - 'Threshold Read Mask': 'lcr ucr', - 'Upper critical': '13.446', - 'Readable Thresholds': 'lcr ucr', - 'Sensor ID': 'Planar 12V (0x18)', - 'Settable Thresholds': 'lcr ucr', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '12.042' - }, - 'Planar 3.3V (0x16)': { - 'Status': 'ok', - 'Sensor Reading': '3.309 (+/- 0) Volts', - 'Entity ID': '7.1 (System Board)', - 'Assertions Enabled': 'lcr- ucr+', - 'Event Message Control': 'Per-threshold', - 'Assertion Events': '', - 'Maximum sensor range': 'Unspecified', - 'Positive Hysteresis': '0.028', - 'Deassertions Enabled': 'lcr- ucr+', - 'Sensor Type (Analog)': 'Voltage', - 'Lower critical': '3.039', - 'Negative Hysteresis': '0.028', - 'Threshold Read Mask': 'lcr ucr', - 'Upper critical': '3.564', - 'Readable Thresholds': 'lcr ucr', - 'Sensor ID': 'Planar 3.3V (0x16)', - 'Settable Thresholds': 'lcr ucr', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3.309' - }, - 'Planar VBAT (0x1c)': { - 'Status': 'ok', - 'Sensor Reading': '3.137 (+/- 0) Volts', - 'Entity ID': '7.1 (System Board)', - 'Assertions Enabled': 'lnc- lcr-', - 'Event Message Control': 'Per-threshold', - 'Assertion Events': '', - 'Readable Thresholds': 'lcr lnc', - 'Positive Hysteresis': '0.025', - 'Deassertions Enabled': 'lnc- lcr-', - 'Sensor Type (Analog)': 'Voltage', - 'Lower critical': '2.095', - 'Negative Hysteresis': '0.025', - 'Lower non-critical': '2.248', - 'Maximum sensor range': 'Unspecified', - 'Sensor ID': 'Planar VBAT (0x1c)', - 'Settable Thresholds': 'lcr lnc', - 'Threshold Read Mask': 'lcr lnc', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '3.010' - }, - 'Planar 5V (0x17)': { - 'Status': 'ok', - 'Sensor Reading': '5.062 (+/- 0) Volts', - 'Entity ID': '7.1 (System Board)', - 'Assertions Enabled': 'lcr- ucr+', - 'Event Message Control': 'Per-threshold', - 'Assertion Events': '', - 'Maximum sensor range': 'Unspecified', - 'Positive Hysteresis': '0.045', - 'Deassertions Enabled': 'lcr- ucr+', - 'Sensor Type (Analog)': 'Voltage', - 'Lower critical': '4.475', - 'Negative Hysteresis': '0.045', - 'Threshold Read Mask': 'lcr ucr', - 'Upper critical': '5.582', - 'Readable Thresholds': 'lcr ucr', - 'Sensor ID': 'Planar 5V (0x17)', - 'Settable Thresholds': 'lcr ucr', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '4.995' - } -} - - -SENSOR_DATA = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - 'Temperature': TEMPERATURE_DATA, - 'Current': CURRENT_DATA, - 'Fan': FAN_DATA, - 'Voltage': VOLTAGE_DATA - } - } -} - - -EMPTY_PAYLOAD = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - } - } -} - - -MISSING_SENSOR = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - 'Temperature': { - 'PCI Riser 1 Temp (0x33)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Entity ID': '16.1 (System Internal Expansion Board)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '70.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '85.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '80.000', - 'Sensor ID': 'PCI Riser 1 Temp (0x33)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - } - } - } -} - - -BAD_SENSOR = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - 'Temperature': { - 'PCI Riser 1 Temp (0x33)': { - 'Status': 'ok', - 'Deassertions Enabled': 'unc+ ucr+ unr+', - 'Sensor Reading': 'some bad stuff', - 'Entity ID': '16.1 (System Internal Expansion Board)', - 'Assertions Enabled': 'unc+ ucr+ unr+', - 'Positive Hysteresis': '4.000', - 'Assertion Events': '', - 'Upper non-critical': '70.000', - 'Event Message Control': 'Per-threshold', - 'Upper non-recoverable': '85.000', - 'Normal Maximum': '112.000', - 'Maximum sensor range': 'Unspecified', - 'Sensor Type (Analog)': 'Temperature', - 'Readable Thresholds': 'unc ucr unr', - 'Negative Hysteresis': 'Unspecified', - 'Threshold Read Mask': 'unc ucr unr', - 'Upper critical': '80.000', - 'Sensor ID': 'PCI Riser 1 Temp (0x33)', - 'Settable Thresholds': '', - 'Minimum sensor range': 'Unspecified', - 'Nominal Reading': '16.000' - }, - } - } - } -} - - -NO_SENSOR_ID = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'node_uuid': 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - 'Temperature': { - 'PCI Riser 1 Temp (0x33)': { - 'Sensor Reading': '26 C', - }, - } - } - } -} - - -NO_NODE_ID = { - 'message_id': 'f22188ca-c068-47ce-a3e5-0e27ffe234c6', - 'publisher_id': 'f23188ca-c068-47ce-a3e5-0e27ffe234c6', - 'payload': { - 'instance_uuid': 'f11251ax-c568-25ca-4582-0x27add644c6', - 'timestamp': '20140223134852', - 'event_type': 'hardware.ipmi.metrics.update', - 'payload': { - 'Temperature': { - 'PCI Riser 1 Temp (0x33)': { - 'Sensor Reading': '26 C', - 'Sensor ID': 'PCI Riser 1 Temp (0x33)', - }, - } - } - } -} diff --git a/ceilometer/tests/unit/ipmi/notifications/test_ironic.py b/ceilometer/tests/unit/ipmi/notifications/test_ironic.py deleted file mode 100644 index 432e0b19..00000000 --- a/ceilometer/tests/unit/ipmi/notifications/test_ironic.py +++ /dev/null @@ -1,214 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for producing IPMI sample messages from notification events. -""" - -import mock -from oslotest import base - -from ceilometer.ipmi.notifications import ironic as ipmi -from ceilometer import sample -from ceilometer.tests.unit.ipmi.notifications import ipmi_test_data - - -class TestNotifications(base.BaseTestCase): - - def test_ipmi_temperature_notification(self): - """Test IPMI Temperature sensor data. - - Based on the above ipmi_testdata the expected sample for a single - temperature reading has:: - - * a resource_id composed from the node_uuid Sensor ID - * a name composed from 'hardware.ipmi.' and 'temperature' - * a volume from the first chunk of the Sensor Reading - * a unit from the last chunk of the Sensor Reading - * some readings are skipped if the value is 'Disabled' - * metatata with the node id - """ - processor = ipmi.TemperatureSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.SENSOR_DATA)]) - - self.assertEqual(10, len(counters), - 'expected 10 temperature readings') - resource_id = ( - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-dimm_gh_vr_temp_(0x3b)' - ) - test_counter = counters[resource_id] - self.assertEqual(26.0, test_counter.volume) - self.assertEqual('C', test_counter.unit) - self.assertEqual(sample.TYPE_GAUGE, test_counter.type) - self.assertEqual('hardware.ipmi.temperature', test_counter.name) - self.assertEqual('hardware.ipmi.metrics.update', - test_counter.resource_metadata['event_type']) - self.assertEqual('f4982fd2-2f2b-4bb5-9aff-48aac801d1ad', - test_counter.resource_metadata['node']) - - def test_ipmi_current_notification(self): - """Test IPMI Current sensor data. - - A single current reading is effectively the same as temperature, - modulo "current". - """ - processor = ipmi.CurrentSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.SENSOR_DATA)]) - - self.assertEqual(1, len(counters), 'expected 1 current reading') - resource_id = ( - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-avg_power_(0x2e)' - ) - test_counter = counters[resource_id] - self.assertEqual(130.0, test_counter.volume) - self.assertEqual('W', test_counter.unit) - self.assertEqual(sample.TYPE_GAUGE, test_counter.type) - self.assertEqual('hardware.ipmi.current', test_counter.name) - - def test_ipmi_fan_notification(self): - """Test IPMI Fan sensor data. - - A single fan reading is effectively the same as temperature, - modulo "fan". - """ - processor = ipmi.FanSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.SENSOR_DATA)]) - - self.assertEqual(12, len(counters), 'expected 12 fan readings') - resource_id = ( - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-fan_4a_tach_(0x46)' - ) - test_counter = counters[resource_id] - self.assertEqual(6900.0, test_counter.volume) - self.assertEqual('RPM', test_counter.unit) - self.assertEqual(sample.TYPE_GAUGE, test_counter.type) - self.assertEqual('hardware.ipmi.fan', test_counter.name) - - def test_ipmi_voltage_notification(self): - """Test IPMI Voltage sensor data. - - A single voltage reading is effectively the same as temperature, - modulo "voltage". - """ - processor = ipmi.VoltageSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.SENSOR_DATA)]) - - self.assertEqual(4, len(counters), 'expected 4 volate readings') - resource_id = ( - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-planar_vbat_(0x1c)' - ) - test_counter = counters[resource_id] - self.assertEqual(3.137, test_counter.volume) - self.assertEqual('V', test_counter.unit) - self.assertEqual(sample.TYPE_GAUGE, test_counter.type) - self.assertEqual('hardware.ipmi.voltage', test_counter.name) - - def test_disabed_skips_metric(self): - """Test that a meter which a disabled volume is skipped.""" - processor = ipmi.TemperatureSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.SENSOR_DATA)]) - - self.assertEqual(10, len(counters), - 'expected 10 temperature readings') - - resource_id = ( - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-mezz_card_temp_(0x35)' - ) - - self.assertNotIn(resource_id, counters) - - def test_empty_payload_no_metrics_success(self): - processor = ipmi.TemperatureSensorNotification(None) - counters = dict([(counter.resource_id, counter) for counter in - processor.process_notification( - ipmi_test_data.EMPTY_PAYLOAD)]) - - self.assertEqual(0, len(counters), 'expected 0 readings') - - @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') - def test_missing_sensor_data(self, mylog): - processor = ipmi.TemperatureSensorNotification(None) - - messages = [] - mylog.warning = lambda *args: messages.extend(args) - - list(processor.process_notification(ipmi_test_data.MISSING_SENSOR)) - - self.assertEqual( - 'invalid sensor data for ' - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): ' - "missing 'Sensor Reading' in payload", - messages[0] - ) - - @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') - def test_sensor_data_malformed(self, mylog): - processor = ipmi.TemperatureSensorNotification(None) - - messages = [] - mylog.warning = lambda *args: messages.extend(args) - - list(processor.process_notification(ipmi_test_data.BAD_SENSOR)) - - self.assertEqual( - 'invalid sensor data for ' - 'f4982fd2-2f2b-4bb5-9aff-48aac801d1ad-pci_riser_1_temp_(0x33): ' - 'unable to parse sensor reading: some bad stuff', - messages[0] - ) - - @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') - def test_missing_node_uuid(self, mylog): - """Test for desired error message when 'node_uuid' missing. - - Presumably this will never happen given the way the data - is created, but better defensive than dead. - """ - processor = ipmi.TemperatureSensorNotification(None) - - messages = [] - mylog.warning = lambda *args: messages.extend(args) - - list(processor.process_notification(ipmi_test_data.NO_NODE_ID)) - - self.assertEqual( - 'invalid sensor data for missing id: missing key in payload: ' - "'node_uuid'", - messages[0] - ) - - @mock.patch('ceilometer.ipmi.notifications.ironic.LOG') - def test_missing_sensor_id(self, mylog): - """Test for desired error message when 'Sensor ID' missing.""" - processor = ipmi.TemperatureSensorNotification(None) - - messages = [] - mylog.warning = lambda *args: messages.extend(args) - - list(processor.process_notification(ipmi_test_data.NO_SENSOR_ID)) - - self.assertEqual( - 'invalid sensor data for missing id: missing key in payload: ' - "'Sensor ID'", - messages[0] - ) diff --git a/ceilometer/tests/unit/ipmi/platform/__init__.py b/ceilometer/tests/unit/ipmi/platform/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/ipmi/platform/fake_utils.py b/ceilometer/tests/unit/ipmi/platform/fake_utils.py deleted file mode 100644 index a8bed725..00000000 --- a/ceilometer/tests/unit/ipmi/platform/fake_utils.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import binascii - -from ceilometer.ipmi.platform import exception as nmexcept -from ceilometer.ipmi.platform import intel_node_manager as node_manager -from ceilometer.tests.unit.ipmi.platform import ipmitool_test_data as test_data - - -def get_sensor_status_init(parameter=''): - return (' 01\n', '') - - -def get_sensor_status_uninit(parameter=''): - return (' 00\n', '') - - -def init_sensor_agent(parameter=''): - return (' 00\n', '') - - -def get_nm_version_v2(parameter=''): - return test_data.nm_version_v2 - - -def get_nm_version_v3(parameter=''): - return test_data.nm_version_v3 - - -def sdr_dump(data_file=''): - if data_file == '': - raise ValueError("No file specified for ipmitool sdr dump") - fake_slave_address = '2c' - fake_channel = '60' - hexstr = node_manager.INTEL_PREFIX + fake_slave_address + fake_channel - data = binascii.unhexlify(hexstr) - with open(data_file, 'wb') as bin_fp: - bin_fp.write(data) - - return ('', '') - - -def _execute(funcs, *cmd, **kwargs): - - datas = { - test_data.device_id_cmd: test_data.device_id, - test_data.nm_device_id_cmd: test_data.nm_device_id, - test_data.get_power_cmd: test_data.power_data, - test_data.get_inlet_temp_cmd: test_data.inlet_temperature_data, - test_data.get_outlet_temp_cmd: test_data.outlet_temperature_data, - test_data.get_airflow_cmd: test_data.airflow_data, - test_data.get_cups_index_cmd: test_data.cups_index_data, - test_data.get_cups_util_cmd: test_data.cups_util_data, - test_data.sdr_info_cmd: test_data.sdr_info, - test_data.read_sensor_temperature_cmd: test_data.sensor_temperature, - test_data.read_sensor_voltage_cmd: test_data.sensor_voltage, - test_data.read_sensor_current_cmd: test_data.sensor_current, - test_data.read_sensor_fan_cmd: test_data.sensor_fan, - } - - if cmd[1] == 'sdr' and cmd[2] == 'dump': - # ipmitool sdr dump /tmp/XXXX - cmd_str = "".join(cmd[:3]) - par_str = cmd[3] - else: - cmd_str = "".join(cmd) - par_str = '' - - try: - return datas[cmd_str] - except KeyError: - return funcs[cmd_str](par_str) - - -def execute_with_nm_v3(*cmd, **kwargs): - """test version of execute on Node Manager V3.0 platform.""" - - funcs = {test_data.sensor_status_cmd: get_sensor_status_init, - test_data.init_sensor_cmd: init_sensor_agent, - test_data.sdr_dump_cmd: sdr_dump, - test_data.nm_version_cmd: get_nm_version_v3} - - return _execute(funcs, *cmd, **kwargs) - - -def execute_with_nm_v2(*cmd, **kwargs): - """test version of execute on Node Manager V2.0 platform.""" - - funcs = {test_data.sensor_status_cmd: get_sensor_status_init, - test_data.init_sensor_cmd: init_sensor_agent, - test_data.sdr_dump_cmd: sdr_dump, - test_data.nm_version_cmd: get_nm_version_v2} - - return _execute(funcs, *cmd, **kwargs) - - -def execute_without_nm(*cmd, **kwargs): - """test version of execute on Non-Node Manager platform.""" - - funcs = {test_data.sensor_status_cmd: get_sensor_status_uninit, - test_data.init_sensor_cmd: init_sensor_agent, - test_data.sdr_dump_cmd: sdr_dump} - - return _execute(funcs, *cmd, **kwargs) - - -def execute_without_ipmi(*cmd, **kwargs): - raise nmexcept.IPMIException diff --git a/ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py b/ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py deleted file mode 100644 index 7504aba3..00000000 --- a/ceilometer/tests/unit/ipmi/platform/ipmitool_test_data.py +++ /dev/null @@ -1,383 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Sample data for test_intel_node_manager and test_ipmi_sensor. - -This data is provided as a sample of the data expected from the ipmitool -binary, which produce Node Manager/IPMI raw data -""" - -sensor_temperature_data = """Sensor ID : SSB Therm Trip (0xd) - Entity ID : 7.1 (System Board) - Sensor Type (Discrete): Temperature - Assertions Enabled : Digital State - [State Asserted] - Deassertions Enabled : Digital State - [State Asserted] - -Sensor ID : BB P1 VR Temp (0x20) - Entity ID : 7.1 (System Board) - Sensor Type (Analog) : Temperature - Sensor Reading : 25 (+/- 0) degrees C - Status : ok - Nominal Reading : 58.000 - Normal Minimum : 10.000 - Normal Maximum : 105.000 - Upper critical : 115.000 - Upper non-critical : 110.000 - Lower critical : 0.000 - Lower non-critical : 5.000 - Positive Hysteresis : 2.000 - Negative Hysteresis : 2.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Assertion Events : - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -Sensor ID : Front Panel Temp (0x21) - Entity ID : 12.1 (Front Panel Board) - Sensor Type (Analog) : Temperature - Sensor Reading : 23 (+/- 0) degrees C - Status : ok - Nominal Reading : 28.000 - Normal Minimum : 10.000 - Normal Maximum : 45.000 - Upper critical : 55.000 - Upper non-critical : 50.000 - Lower critical : 0.000 - Lower non-critical : 5.000 - Positive Hysteresis : 2.000 - Negative Hysteresis : 2.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Assertion Events : - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -Sensor ID : SSB Temp (0x22) - Entity ID : 7.1 (System Board) - Sensor Type (Analog) : Temperature - Sensor Reading : 43 (+/- 0) degrees C - Status : ok - Nominal Reading : 52.000 - Normal Minimum : 10.000 - Normal Maximum : 93.000 - Upper critical : 103.000 - Upper non-critical : 98.000 - Lower critical : 0.000 - Lower non-critical : 5.000 - Positive Hysteresis : 2.000 - Negative Hysteresis : 2.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Assertion Events : - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -""" - -sensor_voltage_data = """Sensor ID : VR Watchdog (0xb) - Entity ID : 7.1 (System Board) - Sensor Type (Discrete): Voltage - Assertions Enabled : Digital State - [State Asserted] - Deassertions Enabled : Digital State - [State Asserted] - -Sensor ID : BB +12.0V (0xd0) - Entity ID : 7.1 (System Board) - Sensor Type (Analog) : Voltage - Sensor Reading : 11.831 (+/- 0) Volts - Status : ok - Nominal Reading : 11.935 - Normal Minimum : 11.363 - Normal Maximum : 12.559 - Upper critical : 13.391 - Upper non-critical : 13.027 - Lower critical : 10.635 - Lower non-critical : 10.947 - Positive Hysteresis : 0.052 - Negative Hysteresis : 0.052 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Assertion Events : - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -Sensor ID : BB +1.35 P1LV AB (0xe4) - Entity ID : 7.1 (System Board) - Sensor Type (Analog) : Voltage - Sensor Reading : Disabled - Status : Disabled - Nominal Reading : 1.342 - Normal Minimum : 1.275 - Normal Maximum : 1.409 - Upper critical : 1.488 - Upper non-critical : 1.445 - Lower critical : 1.201 - Lower non-critical : 1.244 - Positive Hysteresis : 0.006 - Negative Hysteresis : 0.006 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Event Status : Unavailable - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -Sensor ID : BB +5.0V (0xd1) - Entity ID : 7.1 (System Board) - Sensor Type (Analog) : Voltage - Sensor Reading : 4.959 (+/- 0) Volts - Status : ok - Nominal Reading : 4.981 - Normal Minimum : 4.742 - Normal Maximum : 5.241 - Upper critical : 5.566 - Upper non-critical : 5.415 - Lower critical : 4.416 - Lower non-critical : 4.546 - Positive Hysteresis : 0.022 - Negative Hysteresis : 0.022 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc unc ucr - Settable Thresholds : lcr lnc unc ucr - Threshold Read Mask : lcr lnc unc ucr - Assertion Events : - Assertions Enabled : lnc- lcr- unc+ ucr+ - Deassertions Enabled : lnc- lcr- unc+ ucr+ - -""" - -sensor_current_data = """Sensor ID : PS1 Curr Out % (0x58) - Entity ID : 10.1 (Power Supply) - Sensor Type (Analog) : Current - Sensor Reading : 11 (+/- 0) unspecified - Status : ok - Nominal Reading : 50.000 - Normal Minimum : 0.000 - Normal Maximum : 100.000 - Upper critical : 118.000 - Upper non-critical : 100.000 - Positive Hysteresis : Unspecified - Negative Hysteresis : Unspecified - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : unc ucr - Settable Thresholds : unc ucr - Threshold Read Mask : unc ucr - Assertion Events : - Assertions Enabled : unc+ ucr+ - Deassertions Enabled : unc+ ucr+ - -Sensor ID : PS2 Curr Out % (0x59) - Entity ID : 10.2 (Power Supply) - Sensor Type (Analog) : Current - Sensor Reading : 0 (+/- 0) unspecified - Status : ok - Nominal Reading : 50.000 - Normal Minimum : 0.000 - Normal Maximum : 100.000 - Upper critical : 118.000 - Upper non-critical : 100.000 - Positive Hysteresis : Unspecified - Negative Hysteresis : Unspecified - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : unc ucr - Settable Thresholds : unc ucr - Threshold Read Mask : unc ucr - Assertion Events : - Assertions Enabled : unc+ ucr+ - Deassertions Enabled : unc+ ucr+ - -""" - -sensor_fan_data = """Sensor ID : System Fan 1 (0x30) - Entity ID : 29.1 (Fan Device) - Sensor Type (Analog) : Fan - Sensor Reading : 4704 (+/- 0) RPM - Status : ok - Nominal Reading : 7497.000 - Normal Minimum : 2499.000 - Normal Maximum : 12495.000 - Lower critical : 1715.000 - Lower non-critical : 1960.000 - Positive Hysteresis : 49.000 - Negative Hysteresis : 49.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc - Settable Thresholds : lcr lnc - Threshold Read Mask : lcr lnc - Assertion Events : - Assertions Enabled : lnc- lcr- - Deassertions Enabled : lnc- lcr- - -Sensor ID : System Fan 2 (0x32) - Entity ID : 29.2 (Fan Device) - Sensor Type (Analog) : Fan - Sensor Reading : 4704 (+/- 0) RPM - Status : ok - Nominal Reading : 7497.000 - Normal Minimum : 2499.000 - Normal Maximum : 12495.000 - Lower critical : 1715.000 - Lower non-critical : 1960.000 - Positive Hysteresis : 49.000 - Negative Hysteresis : 49.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc - Settable Thresholds : lcr lnc - Threshold Read Mask : lcr lnc - Assertion Events : - Assertions Enabled : lnc- lcr- - Deassertions Enabled : lnc- lcr- - -Sensor ID : System Fan 3 (0x34) - Entity ID : 29.3 (Fan Device) - Sensor Type (Analog) : Fan - Sensor Reading : 4704 (+/- 0) RPM - Status : ok - Nominal Reading : 7497.000 - Normal Minimum : 2499.000 - Normal Maximum : 12495.000 - Lower critical : 1715.000 - Lower non-critical : 1960.000 - Positive Hysteresis : 49.000 - Negative Hysteresis : 49.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc - Settable Thresholds : lcr lnc - Threshold Read Mask : lcr lnc - Assertion Events : - Assertions Enabled : lnc- lcr- - Deassertions Enabled : lnc- lcr- - -Sensor ID : System Fan 4 (0x36) - Entity ID : 29.4 (Fan Device) - Sensor Type (Analog) : Fan - Sensor Reading : 4606 (+/- 0) RPM - Status : ok - Nominal Reading : 7497.000 - Normal Minimum : 2499.000 - Normal Maximum : 12495.000 - Lower critical : 1715.000 - Lower non-critical : 1960.000 - Positive Hysteresis : 49.000 - Negative Hysteresis : 49.000 - Minimum sensor range : Unspecified - Maximum sensor range : Unspecified - Event Message Control : Per-threshold - Readable Thresholds : lcr lnc - Settable Thresholds : lcr lnc - Threshold Read Mask : lcr lnc - Assertion Events : - Assertions Enabled : lnc- lcr- - Deassertions Enabled : lnc- lcr- - -""" - - -sensor_status_cmd = 'ipmitoolraw0x0a0x2c0x00' -init_sensor_cmd = 'ipmitoolraw0x0a0x2c0x01' -sdr_dump_cmd = 'ipmitoolsdrdump' -sdr_info_cmd = 'ipmitoolsdrinfo' - -read_sensor_all_cmd = 'ipmitoolsdr-v' -read_sensor_temperature_cmd = 'ipmitoolsdr-vtypeTemperature' -read_sensor_voltage_cmd = 'ipmitoolsdr-vtypeVoltage' -read_sensor_current_cmd = 'ipmitoolsdr-vtypeCurrent' -read_sensor_fan_cmd = 'ipmitoolsdr-vtypeFan' - -device_id_cmd = 'ipmitoolraw0x060x01' -nm_device_id_cmd = 'ipmitool-b0x6-t0x2craw0x060x01' -nm_version_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xca0x570x010x00' -get_power_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x010x000x00' -get_inlet_temp_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x020x000x00' -get_outlet_temp_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x050x000x00' -get_airflow_cmd = 'ipmitool-b0x6-t0x2craw0x2e0xc80x570x010x000x040x000x00' -get_cups_index_cmd = 'ipmitool-b0x6-t0x2craw0x2e0x650x570x010x000x01' -get_cups_util_cmd = 'ipmitool-b0x6-t0x2craw0x2e0x650x570x010x000x05' - - -device_id = (' 21 01 01 04 02 bf 57 01 00 49 00 01 07 50 0b', '') -nm_device_id = (' 50 01 02 15 02 21 57 01 00 02 0b 02 09 10 01', '') - -nm_version_v2 = (' 57 01 00 03 02 00 02 15', '') -nm_version_v3 = (' 57 01 00 05 03 00 03 06', '') - -# start from byte 3, get cur- 57 00(87), min- 03 00(3) -# max- 37 02(567), avg- 5c 00(92) -power_data = (' 57 01 00 57 00 03 00 37 02 5c 00 cc 37 f4 53 ce\n' - ' 9b 12 01 50\n', '') - -# start from byte 3, get cur- 17 00(23), min- 16 00(22) -# max- 18 00(24), avg- 17 00(23) -inlet_temperature_data = (' 57 01 00 17 00 16 00 18 00 17 00 f3 6f fe 53 85\n' - ' b7 02 00 50\n', '') - -# start from byte 3, get cur- 19 00(25), min- 18 00(24) -# max- 1b 00(27), avg- 19 00(25) -outlet_temperature_data = (' 57 01 00 19 00 18 00 1b 00 19 00 f3 6f fe 53 85\n' - ' b7 02 00 50\n', '') - -# start from byte 3, get cur- be 00(190), min- 96 00(150) -# max- 26 02(550), avg- cb 00(203) -airflow_data = (' 57 01 00 be 00 96 00 26 02 cb 00 e1 65 c1 54 db\n' - ' b7 02 00 50\n', '') - -# start from byte 3, cups index 2e 00 (46) -cups_index_data = (' 57 01 00 2e 00\n', '') - -# start from byte 3, get cup_util - 33 00 ...(51), mem_util - 05 00 ...(5) -# io_util - 00 00 ...(0) -cups_util_data = (' 57 01 00 33 00 00 00 00 00 00 00 05 00 00 00 00\n' - ' 00 00 00 00 00 00 00 00 00 00 00\n', '') - -sdr_info = ('', '') - -sensor_temperature = (sensor_temperature_data, '') -sensor_voltage = (sensor_voltage_data, '') -sensor_current = (sensor_current_data, '') -sensor_fan = (sensor_fan_data, '') diff --git a/ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py b/ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py deleted file mode 100644 index 0383958c..00000000 --- a/ceilometer/tests/unit/ipmi/platform/test_intel_node_manager.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import tempfile - -import mock -from oslotest import base -import six - -from ceilometer.ipmi.platform import intel_node_manager as node_manager -from ceilometer.tests.unit.ipmi.platform import fake_utils -from ceilometer import utils - - -@six.add_metaclass(abc.ABCMeta) -class _Base(base.BaseTestCase): - - @abc.abstractmethod - def init_test_engine(self): - """Prepare specific ipmitool as engine for different NM version.""" - - def setUp(self): - super(_Base, self).setUp() - self.init_test_engine() - self.nm = node_manager.NodeManager() - - @classmethod - def tearDownClass(cls): - # reset inited to force an initialization of singleton for next test - node_manager.NodeManager()._inited = False - super(_Base, cls).tearDownClass() - - -class TestNodeManagerV3(_Base): - - def init_test_engine(self): - utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v3) - - def test_read_airflow(self): - airflow = self.nm.read_airflow() - avg_val = node_manager._hex(airflow["Average_value"]) - max_val = node_manager._hex(airflow["Maximum_value"]) - min_val = node_manager._hex(airflow["Minimum_value"]) - cur_val = node_manager._hex(airflow["Current_value"]) - - # get NM 3.0 - self.assertEqual(5, self.nm.nm_version) - - # see ipmi_test_data.py for raw data - self.assertEqual(190, cur_val) - self.assertEqual(150, min_val) - self.assertEqual(550, max_val) - self.assertEqual(203, avg_val) - - def test_read_outlet_temperature(self): - temperature = self.nm.read_outlet_temperature() - avg_val = node_manager._hex(temperature["Average_value"]) - max_val = node_manager._hex(temperature["Maximum_value"]) - min_val = node_manager._hex(temperature["Minimum_value"]) - cur_val = node_manager._hex(temperature["Current_value"]) - - # get NM 3.0 - self.assertEqual(5, self.nm.nm_version) - - # see ipmi_test_data.py for raw data - self.assertEqual(25, cur_val) - self.assertEqual(24, min_val) - self.assertEqual(27, max_val) - self.assertEqual(25, avg_val) - - def test_read_cups_utilization(self): - cups_util = self.nm.read_cups_utilization() - cpu_util = node_manager._hex(cups_util["CPU_Utilization"]) - mem_util = node_manager._hex(cups_util["Mem_Utilization"]) - io_util = node_manager._hex(cups_util["IO_Utilization"]) - - # see ipmi_test_data.py for raw data - self.assertEqual(51, cpu_util) - self.assertEqual(5, mem_util) - self.assertEqual(0, io_util) - - def test_read_cups_index(self): - cups_index = self.nm.read_cups_index() - index = node_manager._hex(cups_index["CUPS_Index"]) - self.assertEqual(46, index) - - -class TestNodeManager(_Base): - - def init_test_engine(self): - utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v2) - - def test_read_power_all(self): - power = self.nm.read_power_all() - - avg_val = node_manager._hex(power["Average_value"]) - max_val = node_manager._hex(power["Maximum_value"]) - min_val = node_manager._hex(power["Minimum_value"]) - cur_val = node_manager._hex(power["Current_value"]) - - # get NM 2.0 - self.assertEqual(3, self.nm.nm_version) - # see ipmi_test_data.py for raw data - self.assertEqual(87, cur_val) - self.assertEqual(3, min_val) - self.assertEqual(567, max_val) - self.assertEqual(92, avg_val) - - def test_read_inlet_temperature(self): - temperature = self.nm.read_inlet_temperature() - - avg_val = node_manager._hex(temperature["Average_value"]) - max_val = node_manager._hex(temperature["Maximum_value"]) - min_val = node_manager._hex(temperature["Minimum_value"]) - cur_val = node_manager._hex(temperature["Current_value"]) - - # see ipmi_test_data.py for raw data - self.assertEqual(23, cur_val) - self.assertEqual(22, min_val) - self.assertEqual(24, max_val) - self.assertEqual(23, avg_val) - - def test_read_airflow(self): - airflow = self.nm.read_airflow() - self.assertEqual({}, airflow) - - def test_read_outlet_temperature(self): - temperature = self.nm.read_outlet_temperature() - self.assertEqual({}, temperature) - - def test_read_cups_utilization(self): - cups_util = self.nm.read_cups_utilization() - self.assertEqual({}, cups_util) - - def test_read_cups_index(self): - cups_index = self.nm.read_cups_index() - self.assertEqual({}, cups_index) - - -class TestNonNodeManager(_Base): - - def init_test_engine(self): - utils.execute = mock.Mock(side_effect=fake_utils.execute_without_nm) - - def test_read_power_all(self): - # no NM support - self.assertEqual(0, self.nm.nm_version) - power = self.nm.read_power_all() - - # Non-Node Manager platform return empty data - self.assertEqual({}, power) - - def test_read_inlet_temperature(self): - temperature = self.nm.read_inlet_temperature() - - # Non-Node Manager platform return empty data - self.assertEqual({}, temperature) - - -class ParseSDRFileTestCase(base.BaseTestCase): - - def setUp(self): - super(ParseSDRFileTestCase, self).setUp() - self.temp_file = tempfile.NamedTemporaryFile().name - - def test_parsing_found(self): - data = b'\x00\xFF\x00\xFF\x57\x01\x00\x0D\x01\x0A\xB2\x00\xFF' - with open(self.temp_file, 'wb') as f: - f.write(data) - result = node_manager.NodeManager._parse_slave_and_channel( - self.temp_file) - self.assertEqual(('0a', 'b'), result) - - def test_parsing_not_found(self): - data = b'\x00\xFF\x00\xFF\x52\x01\x80\x0D\x01\x6A\xB7\x00\xFF' - with open(self.temp_file, 'wb') as f: - f.write(data) - result = node_manager.NodeManager._parse_slave_and_channel( - self.temp_file) - self.assertIsNone(result) diff --git a/ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py b/ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py deleted file mode 100644 index e6eaddea..00000000 --- a/ceilometer/tests/unit/ipmi/platform/test_ipmi_sensor.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import base - -from ceilometer.ipmi.platform import ipmi_sensor -from ceilometer.tests.unit.ipmi.platform import fake_utils -from ceilometer import utils - - -class TestIPMISensor(base.BaseTestCase): - - def setUp(self): - super(TestIPMISensor, self).setUp() - - utils.execute = mock.Mock(side_effect=fake_utils.execute_with_nm_v2) - self.ipmi = ipmi_sensor.IPMISensor() - - @classmethod - def tearDownClass(cls): - # reset inited to force an initialization of singleton for next test - ipmi_sensor.IPMISensor()._inited = False - super(TestIPMISensor, cls).tearDownClass() - - def test_read_sensor_temperature(self): - sensors = self.ipmi.read_sensor_any('Temperature') - - self.assertTrue(self.ipmi.ipmi_support) - # only temperature data returned. - self.assertIn('Temperature', sensors) - self.assertEqual(1, len(sensors)) - - # 4 sensor data in total, ignore 1 without 'Sensor Reading'. - # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py - self.assertEqual(3, len(sensors['Temperature'])) - sensor = sensors['Temperature']['BB P1 VR Temp (0x20)'] - self.assertEqual('25 (+/- 0) degrees C', sensor['Sensor Reading']) - - def test_read_sensor_voltage(self): - sensors = self.ipmi.read_sensor_any('Voltage') - - # only voltage data returned. - self.assertIn('Voltage', sensors) - self.assertEqual(1, len(sensors)) - - # 4 sensor data in total, ignore 1 without 'Sensor Reading'. - # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py - self.assertEqual(3, len(sensors['Voltage'])) - sensor = sensors['Voltage']['BB +5.0V (0xd1)'] - self.assertEqual('4.959 (+/- 0) Volts', sensor['Sensor Reading']) - - def test_read_sensor_current(self): - sensors = self.ipmi.read_sensor_any('Current') - - # only Current data returned. - self.assertIn('Current', sensors) - self.assertEqual(1, len(sensors)) - - # 2 sensor data in total. - # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py - self.assertEqual(2, len(sensors['Current'])) - sensor = sensors['Current']['PS1 Curr Out % (0x58)'] - self.assertEqual('11 (+/- 0) unspecified', sensor['Sensor Reading']) - - def test_read_sensor_fan(self): - sensors = self.ipmi.read_sensor_any('Fan') - - # only Fan data returned. - self.assertIn('Fan', sensors) - self.assertEqual(1, len(sensors)) - - # 2 sensor data in total. - # Check ceilometer/tests/ipmi/platform/ipmi_test_data.py - self.assertEqual(4, len(sensors['Fan'])) - sensor = sensors['Fan']['System Fan 2 (0x32)'] - self.assertEqual('4704 (+/- 0) RPM', sensor['Sensor Reading']) - - -class TestNonIPMISensor(base.BaseTestCase): - - def setUp(self): - super(TestNonIPMISensor, self).setUp() - - utils.execute = mock.Mock(side_effect=fake_utils.execute_without_ipmi) - self.ipmi = ipmi_sensor.IPMISensor() - - @classmethod - def tearDownClass(cls): - # reset inited to force an initialization of singleton for next test - ipmi_sensor.IPMISensor()._inited = False - super(TestNonIPMISensor, cls).tearDownClass() - - def test_read_sensor_temperature(self): - sensors = self.ipmi.read_sensor_any('Temperature') - - self.assertFalse(self.ipmi.ipmi_support) - # Non-IPMI platform return empty data - self.assertEqual({}, sensors) - - def test_read_sensor_voltage(self): - sensors = self.ipmi.read_sensor_any('Voltage') - - # Non-IPMI platform return empty data - self.assertEqual({}, sensors) - - def test_read_sensor_current(self): - sensors = self.ipmi.read_sensor_any('Current') - - # Non-IPMI platform return empty data - self.assertEqual({}, sensors) - - def test_read_sensor_fan(self): - sensors = self.ipmi.read_sensor_any('Fan') - - # Non-IPMI platform return empty data - self.assertEqual({}, sensors) diff --git a/ceilometer/tests/unit/ipmi/pollsters/__init__.py b/ceilometer/tests/unit/ipmi/pollsters/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/ipmi/pollsters/base.py b/ceilometer/tests/unit/ipmi/pollsters/base.py deleted file mode 100644 index 6b8023d4..00000000 --- a/ceilometer/tests/unit/ipmi/pollsters/base.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2014 Intel -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import mock -from oslotest import mockpatch -import six - -from ceilometer.agent import manager -from ceilometer.tests import base - - -@six.add_metaclass(abc.ABCMeta) -class TestPollsterBase(base.BaseTestCase): - - def fake_data(self): - """Fake data used for test.""" - return None - - def fake_sensor_data(self, sensor_type): - """Fake sensor data used for test.""" - return None - - @abc.abstractmethod - def make_pollster(self): - """Produce right pollster for test.""" - - def _test_get_samples(self): - nm = mock.Mock() - nm.read_inlet_temperature.side_effect = self.fake_data - nm.read_outlet_temperature.side_effect = self.fake_data - nm.read_power_all.side_effect = self.fake_data - nm.read_airflow.side_effect = self.fake_data - nm.read_cups_index.side_effect = self.fake_data - nm.read_cups_utilization.side_effect = self.fake_data - nm.read_sensor_any.side_effect = self.fake_sensor_data - # We should mock the pollster first before initialize the Manager - # so that we don't trigger the sudo in pollsters' __init__(). - self.useFixture(mockpatch.Patch( - 'ceilometer.ipmi.platform.intel_node_manager.NodeManager', - return_value=nm)) - - self.useFixture(mockpatch.Patch( - 'ceilometer.ipmi.platform.ipmi_sensor.IPMISensor', - return_value=nm)) - - self.mgr = manager.AgentManager(['ipmi']) - - self.pollster = self.make_pollster() - - def _verify_metering(self, length, expected_vol=None, node=None): - cache = {} - resources = ['local_host'] - - samples = list(self.pollster.get_samples(self.mgr, cache, resources)) - self.assertEqual(length, len(samples)) - - if expected_vol: - self.assertTrue(any(s.volume == expected_vol for s in samples)) - if node: - self.assertTrue(any(s.resource_metadata['node'] == node - for s in samples)) diff --git a/ceilometer/tests/unit/ipmi/pollsters/test_node.py b/ceilometer/tests/unit/ipmi/pollsters/test_node.py deleted file mode 100644 index 4b3e7c6a..00000000 --- a/ceilometer/tests/unit/ipmi/pollsters/test_node.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg - -from ceilometer.ipmi.pollsters import node -from ceilometer.tests.unit.ipmi.pollsters import base - -CONF = cfg.CONF -CONF.import_opt('host', 'ceilometer.service') - - -class TestPowerPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"Current_value": ['13', '00']} - - def make_pollster(self): - return node.PowerPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 19(0x13 as current_value) - self._verify_metering(1, 19, CONF.host) - - -class TestInletTemperaturePollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"Current_value": ['23', '00']} - - def make_pollster(self): - return node.InletTemperaturePollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 35(0x23 as current_value) - self._verify_metering(1, 35, CONF.host) - - -class TestOutletTemperaturePollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"Current_value": ['25', '00']} - - def make_pollster(self): - return node.OutletTemperaturePollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 37(0x25 as current_value) - self._verify_metering(1, 37, CONF.host) - - -class TestAirflowPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"Current_value": ['be', '00']} - - def make_pollster(self): - return node.AirflowPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 190(0xbe as current_value) - self._verify_metering(1, 190, CONF.host) - - -class TestCUPSIndexPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"CUPS_Index": ['2e', '00']} - - def make_pollster(self): - return node.CUPSIndexPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 190(0xbe) - self._verify_metering(1, 46, CONF.host) - - -class CPUUtilPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"CPU_Utilization": - ['33', '00', '00', '00', '00', '00', '00', '00']} - - def make_pollster(self): - return node.CPUUtilPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 190(0xbe) - self._verify_metering(1, 51, CONF.host) - - -class MemUtilPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"Mem_Utilization": - ['05', '00', '00', '00', '00', '00', '00', '00']} - - def make_pollster(self): - return node.MemUtilPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 5(0x05) - self._verify_metering(1, 5, CONF.host) - - -class IOUtilPollster(base.TestPollsterBase): - - def fake_data(self): - # data after parsing Intel Node Manager output - return {"IO_Utilization": - ['00', '00', '00', '00', '00', '00', '00', '00']} - - def make_pollster(self): - return node.IOUtilPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - # only one sample, and value is 0(0x00) - self._verify_metering(1, 0, CONF.host) diff --git a/ceilometer/tests/unit/ipmi/pollsters/test_sensor.py b/ceilometer/tests/unit/ipmi/pollsters/test_sensor.py deleted file mode 100644 index 96f5a3f4..00000000 --- a/ceilometer/tests/unit/ipmi/pollsters/test_sensor.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright 2014 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg - -from ceilometer.ipmi.pollsters import sensor -from ceilometer.tests.unit.ipmi.notifications import ipmi_test_data -from ceilometer.tests.unit.ipmi.pollsters import base - -CONF = cfg.CONF -CONF.import_opt('host', 'ceilometer.service') - -TEMPERATURE_SENSOR_DATA = { - 'Temperature': ipmi_test_data.TEMPERATURE_DATA -} - -CURRENT_SENSOR_DATA = { - 'Current': ipmi_test_data.CURRENT_DATA -} - -FAN_SENSOR_DATA = { - 'Fan': ipmi_test_data.FAN_DATA -} - -VOLTAGE_SENSOR_DATA = { - 'Voltage': ipmi_test_data.VOLTAGE_DATA -} - -MISSING_SENSOR_DATA = ipmi_test_data.MISSING_SENSOR['payload']['payload'] -MALFORMED_SENSOR_DATA = ipmi_test_data.BAD_SENSOR['payload']['payload'] -MISSING_ID_SENSOR_DATA = ipmi_test_data.NO_SENSOR_ID['payload']['payload'] - - -class TestTemperatureSensorPollster(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return TEMPERATURE_SENSOR_DATA - - def make_pollster(self): - return sensor.TemperatureSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - self._verify_metering(10, float(32), CONF.host) - - -class TestMissingSensorData(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return MISSING_SENSOR_DATA - - def make_pollster(self): - return sensor.TemperatureSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - self._verify_metering(0) - - -class TestMalformedSensorData(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return MALFORMED_SENSOR_DATA - - def make_pollster(self): - return sensor.TemperatureSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - self._verify_metering(0) - - -class TestMissingSensorId(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return MISSING_ID_SENSOR_DATA - - def make_pollster(self): - return sensor.TemperatureSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - self._verify_metering(0) - - -class TestFanSensorPollster(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return FAN_SENSOR_DATA - - def make_pollster(self): - return sensor.FanSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - self._verify_metering(12, float(7140), CONF.host) - - -class TestCurrentSensorPollster(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return CURRENT_SENSOR_DATA - - def make_pollster(self): - return sensor.CurrentSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - self._verify_metering(1, float(130), CONF.host) - - -class TestVoltageSensorPollster(base.TestPollsterBase): - - def fake_sensor_data(self, sensor_type): - return VOLTAGE_SENSOR_DATA - - def make_pollster(self): - return sensor.VoltageSensorPollster() - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def test_get_samples(self): - self._test_get_samples() - - self._verify_metering(4, float(3.309), CONF.host) diff --git a/ceilometer/tests/unit/meter/test_meter_plugins.py b/ceilometer/tests/unit/meter/test_meter_plugins.py deleted file mode 100644 index 4d8d04d9..00000000 --- a/ceilometer/tests/unit/meter/test_meter_plugins.py +++ /dev/null @@ -1,71 +0,0 @@ -# -# Copyright 2016 Mirantis Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mock -from oslotest import base - -from ceilometer.event import trait_plugins - - -class TestTimedeltaPlugin(base.BaseTestCase): - - def setUp(self): - super(TestTimedeltaPlugin, self).setUp() - self.plugin = trait_plugins.TimedeltaPlugin() - - def test_timedelta_transformation(self): - match_list = [('test.timestamp1', '2016-03-02T15:04:32'), - ('test.timestamp2', '2016-03-02T16:04:32')] - value = self.plugin.trait_value(match_list) - self.assertEqual(3600, value) - - def test_timedelta_missing_field(self): - match_list = [('test.timestamp1', '2016-03-02T15:04:32')] - with mock.patch('%s.LOG' % self.plugin.trait_value.__module__) as log: - self.assertIsNone(self.plugin.trait_value(match_list)) - log.warning.assert_called_once_with( - 'Timedelta plugin is required two timestamp fields to create ' - 'timedelta value.') - - def test_timedelta_exceed_field(self): - match_list = [('test.timestamp1', '2016-03-02T15:04:32'), - ('test.timestamp2', '2016-03-02T16:04:32'), - ('test.timestamp3', '2016-03-02T16:10:32')] - with mock.patch('%s.LOG' % self.plugin.trait_value.__module__) as log: - self.assertIsNone(self.plugin.trait_value(match_list)) - log.warning.assert_called_once_with( - 'Timedelta plugin is required two timestamp fields to create ' - 'timedelta value.') - - def test_timedelta_invalid_timestamp(self): - match_list = [('test.timestamp1', '2016-03-02T15:04:32'), - ('test.timestamp2', '2016-03-02T15:004:32')] - with mock.patch('%s.LOG' % self.plugin.trait_value.__module__) as log: - self.assertIsNone(self.plugin.trait_value(match_list)) - msg = log.warning._mock_call_args[0][0] - self.assertTrue(msg.startswith('Failed to parse date from set ' - 'fields, both fields ') - ) - - def test_timedelta_reverse_timestamp_order(self): - match_list = [('test.timestamp1', '2016-03-02T15:15:32'), - ('test.timestamp2', '2016-03-02T15:10:32')] - value = self.plugin.trait_value(match_list) - self.assertEqual(300, value) - - def test_timedelta_precise_difference(self): - match_list = [('test.timestamp1', '2016-03-02T15:10:32.786893'), - ('test.timestamp2', '2016-03-02T15:10:32.786899')] - value = self.plugin.trait_value(match_list) - self.assertEqual(0.000006, value) diff --git a/ceilometer/tests/unit/meter/test_notifications.py b/ceilometer/tests/unit/meter/test_notifications.py deleted file mode 100644 index 7da56432..00000000 --- a/ceilometer/tests/unit/meter/test_notifications.py +++ /dev/null @@ -1,714 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer.meter.notifications -""" -import copy -import mock -import os -import six -import yaml - -from oslo_config import fixture as fixture_config -from oslo_utils import encodeutils -from oslo_utils import fileutils - -import ceilometer -from ceilometer import declarative -from ceilometer.meter import notifications -from ceilometer import service as ceilometer_service -from ceilometer.tests import base as test - -NOTIFICATION = { - 'event_type': u'test.create', - 'timestamp': u'2015-06-1909: 19: 35.786893', - 'payload': {u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', - u'resource_id': u'bea70e51c7340cb9d555b15cbfcaec23', - u'timestamp': u'2015-06-19T09:19:35.785330', - u'created_at': u'2015-06-19T09:25:35.785330', - u'launched_at': u'2015-06-19T09:25:40.785330', - u'message_signature': u'fake_signature1', - u'resource_metadata': {u'foo': u'bar'}, - u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', - u'volume': 1.0, - u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', - }, - u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', - u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', - u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2', - 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e', - 'publisher_id': "foo123" -} - -MIDDLEWARE_EVENT = { - u'_context_request_id': u'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650', - u'_context_quota_class': None, - u'event_type': u'objectstore.http.request', - u'_context_service_catalog': [], - u'_context_auth_token': None, - u'_context_user_id': None, - u'priority': u'INFO', - u'_context_is_admin': True, - u'_context_user': None, - u'publisher_id': u'ceilometermiddleware', - u'message_id': u'6eccedba-120e-4db8-9735-2ad5f061e5ee', - u'_context_remote_address': None, - u'_context_roles': [], - u'timestamp': u'2013-07-29 06:51:34.474815', - u'_context_timestamp': u'2013-07-29T06:51:34.348091', - u'_unique_id': u'0ee26117077648e18d88ac76e28a72e2', - u'_context_project_name': None, - u'_context_read_deleted': u'no', - u'_context_tenant': None, - u'_context_instance_lock_checked': False, - u'_context_project_id': None, - u'_context_user_name': None, - u'payload': { - 'typeURI': 'http: //schemas.dmtf.org/cloud/audit/1.0/event', - 'eventTime': '2015-01-30T16: 38: 43.233621', - 'target': { - 'action': 'get', - 'typeURI': 'service/storage/object', - 'id': 'account', - 'metadata': { - 'path': '/1.0/CUSTOM_account/container/obj', - 'version': '1.0', - 'container': 'container', - 'object': 'obj' - } - }, - 'observer': { - 'id': 'target' - }, - 'eventType': 'activity', - 'measurements': [ - { - 'metric': { - 'metricId': 'openstack: uuid', - 'name': 'storage.objects.outgoing.bytes', - 'unit': 'B' - }, - 'result': 28 - }, - { - 'metric': { - 'metricId': 'openstack: uuid2', - 'name': 'storage.objects.incoming.bytes', - 'unit': 'B' - }, - 'result': 1 - } - ], - 'initiator': { - 'typeURI': 'service/security/account/user', - 'project_id': None, - 'id': 'openstack: 288f6260-bf37-4737-a178-5038c84ba244' - }, - 'action': 'read', - 'outcome': 'success', - 'id': 'openstack: 69972bb6-14dd-46e4-bdaf-3148014363dc' - } -} - -FULL_MULTI_MSG = { - u'_context_domain': None, - u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', - 'event_type': u'full.sample', - 'timestamp': u'2015-06-1909: 19: 35.786893', - u'_context_auth_token': None, - u'_context_read_only': False, - 'payload': [{ - u'counter_name': u'instance1', - u'user_id': u'user1', - u'resource_id': u'res1', - u'counter_unit': u'ns', - u'counter_volume': 28.0, - u'project_id': u'proj1', - u'counter_type': u'gauge' - }, - { - u'counter_name': u'instance2', - u'user_id': u'user2', - u'resource_id': u'res2', - u'counter_unit': u'%', - u'counter_volume': 1.0, - u'project_id': u'proj2', - u'counter_type': u'delta' - }], - u'_context_resource_uuid': None, - u'_context_user_identity': u'fake_user_identity---', - u'_context_show_deleted': False, - u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', - 'priority': 'info', - u'_context_is_admin': True, - u'_context_project_domain': None, - u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2', - u'_context_user_domain': None, - 'publisher_id': u'ceilometer.api', - 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' -} - -METRICS_UPDATE = { - u'_context_request_id': u'req-a8bfa89b-d28b-4b95-9e4b-7d7875275650', - u'_context_quota_class': None, - u'event_type': u'compute.metrics.update', - u'_context_service_catalog': [], - u'_context_auth_token': None, - u'_context_user_id': None, - u'payload': { - u'metrics': [ - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.frequency', 'value': 1600, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.user.time', 'value': 17421440000000, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.kernel.time', 'value': 7852600000000, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.idle.time', 'value': 1307374400000000, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.iowait.time', 'value': 11697470000000, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.user.percent', 'value': 0.012959045637294348, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.kernel.percent', 'value': 0.005841204961898534, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.idle.percent', 'value': 0.9724985141658965, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.iowait.percent', 'value': 0.008701235234910634, - 'source': 'libvirt.LibvirtDriver'}, - {'timestamp': u'2013-07-29T06:51:34.472416', - 'name': 'cpu.percent', 'value': 0.027501485834103515, - 'source': 'libvirt.LibvirtDriver'}], - u'nodename': u'tianst.sh.intel.com', - u'host': u'tianst', - u'host_id': u'10.0.1.1'}, - u'priority': u'INFO', - u'_context_is_admin': True, - u'_context_user': None, - u'publisher_id': u'compute.tianst.sh.intel.com', - u'message_id': u'6eccedba-120e-4db8-9735-2ad5f061e5ee', - u'_context_remote_address': None, - u'_context_roles': [], - u'timestamp': u'2013-07-29 06:51:34.474815', - u'_context_timestamp': u'2013-07-29T06:51:34.348091', - u'_unique_id': u'0ee26117077648e18d88ac76e28a72e2', - u'_context_project_name': None, - u'_context_read_deleted': u'no', - u'_context_tenant': None, - u'_context_instance_lock_checked': False, - u'_context_project_id': None, - u'_context_user_name': None -} - - -class TestMeterDefinition(test.BaseTestCase): - - def test_config_definition(self): - cfg = dict(name="test", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id") - handler = notifications.MeterDefinition(cfg, mock.Mock()) - self.assertTrue(handler.match_type("test.create")) - sample = list(handler.to_samples(NOTIFICATION))[0] - self.assertEqual(1.0, sample["volume"]) - self.assertEqual("bea70e51c7340cb9d555b15cbfcaec23", - sample["resource_id"]) - self.assertEqual("30be1fc9a03c4e94ab05c403a8a377f2", - sample["project_id"]) - - def test_config_required_missing_fields(self): - cfg = dict() - try: - notifications.MeterDefinition(cfg, mock.Mock()) - except declarative.DefinitionException as e: - self.assertIn("Required fields ['name', 'type', 'event_type'," - " 'unit', 'volume', 'resource_id']" - " not specified", - encodeutils.exception_to_unicode(e)) - - def test_bad_type_cfg_definition(self): - cfg = dict(name="test", type="foo", event_type="bar.create", - unit="foo", volume="bar", - resource_id="bea70e51c7340cb9d555b15cbfcaec23") - try: - notifications.MeterDefinition(cfg, mock.Mock()) - except declarative.DefinitionException as e: - self.assertIn("Invalid type foo specified", - encodeutils.exception_to_unicode(e)) - - -class TestMeterProcessing(test.BaseTestCase): - - def setUp(self): - super(TestMeterProcessing, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - ceilometer_service.prepare_service(argv=[], config_files=[]) - self.handler = notifications.ProcessMeterNotifications(mock.Mock()) - - def test_fallback_meter_path(self): - self.CONF.set_override('meter_definitions_cfg_file', - '/not/existing/path', group='meter') - with mock.patch('ceilometer.declarative.open', - mock.mock_open(read_data='---\nmetric: []'), - create=True) as mock_open: - self.handler._load_definitions() - if six.PY3: - path = os.path.dirname(ceilometer.__file__) - else: - path = "ceilometer" - mock_open.assert_called_with(path + "/meter/data/meters.yaml") - - def _load_meter_def_file(self, cfg): - if six.PY3: - cfg = cfg.encode('utf-8') - meter_cfg_file = fileutils.write_to_tempfile(content=cfg, - prefix="meters", - suffix="yaml") - self.CONF.set_override('meter_definitions_cfg_file', - meter_cfg_file, group='meter') - self.handler.definitions = self.handler._load_definitions() - - @mock.patch('ceilometer.meter.notifications.LOG') - def test_bad_meter_definition_skip(self, LOG): - cfg = yaml.dump( - {'metric': [dict(name="good_test_1", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id"), - dict(name="bad_test_2", type="bad_type", - event_type="bar.create", - unit="foo", volume="bar", - resource_id="bea70e51c7340cb9d555b15cbfcaec23"), - dict(name="good_test_3", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - self.assertEqual(2, len(self.handler.definitions)) - args, kwargs = LOG.error.call_args_list[0] - self.assertEqual("Error loading meter definition: %s", args[0]) - self.assertTrue(args[1].endswith("Invalid type bad_type specified")) - - def test_jsonpath_values_parsed(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual('test1', s1['name']) - self.assertEqual(1.0, s1['volume']) - self.assertEqual('bea70e51c7340cb9d555b15cbfcaec23', s1['resource_id']) - self.assertEqual('30be1fc9a03c4e94ab05c403a8a377f2', s1['project_id']) - - def test_multiple_meter(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id"), - dict(name="test2", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - data = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(2, len(data)) - expected_names = ['test1', 'test2'] - for s in data: - self.assertIn(s.as_dict()['name'], expected_names) - - def test_unmatched_meter(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.update", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(0, len(c)) - - def test_regex_match_meter(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.*", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(1, len(c)) - - def test_default_timestamp(self): - event = copy.deepcopy(MIDDLEWARE_EVENT) - del event['payload']['measurements'][1] - cfg = yaml.dump( - {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", - event_type="objectstore.http.request", - type="delta", - unit="$.payload.measurements.[*].metric.[*].unit", - volume="$.payload.measurements.[*].result", - resource_id="$.payload.target_id", - project_id="$.payload.initiator.project_id", - multi="name")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(event)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual(MIDDLEWARE_EVENT['timestamp'], s1['timestamp']) - - def test_custom_timestamp(self): - event = copy.deepcopy(MIDDLEWARE_EVENT) - del event['payload']['measurements'][1] - cfg = yaml.dump( - {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", - event_type="objectstore.http.request", - type="delta", - unit="$.payload.measurements.[*].metric.[*].unit", - volume="$.payload.measurements.[*].result", - resource_id="$.payload.target_id", - project_id="$.payload.initiator.project_id", - multi="name", - timestamp='$.payload.eventTime')]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(event)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual(MIDDLEWARE_EVENT['payload']['eventTime'], - s1['timestamp']) - - def test_custom_timestamp_expr_meter(self): - cfg = yaml.dump( - {'metric': [dict(name='compute.node.cpu.frequency', - event_type="compute.metrics.update", - type='gauge', - unit="ns", - volume="$.payload.metrics[?(@.name='cpu.frequency')]" - ".value", - resource_id="'prefix-' + $.payload.nodename", - timestamp="$.payload.metrics" - "[?(@.name='cpu.frequency')].timestamp")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(METRICS_UPDATE)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual('compute.node.cpu.frequency', s1['name']) - self.assertEqual("2013-07-29T06:51:34.472416", s1['timestamp']) - - def test_default_metadata(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.*", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - meta = NOTIFICATION['payload'].copy() - meta['host'] = NOTIFICATION['publisher_id'] - meta['event_type'] = NOTIFICATION['event_type'] - self.assertEqual(meta, s1['resource_metadata']) - - def test_datetime_plugin(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.*", - type="gauge", - unit="sec", - volume={"fields": ["$.payload.created_at", - "$.payload.launched_at"], - "plugin": "timedelta"}, - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual(5.0, s1['volume']) - - def test_custom_metadata(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.*", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id", - metadata={'proj': '$.payload.project_id', - 'dict': '$.payload.resource_metadata'})]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - meta = {'proj': s1['project_id'], - 'dict': NOTIFICATION['payload']['resource_metadata']} - self.assertEqual(meta, s1['resource_metadata']) - - def test_multi_match_event_meter(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id"), - dict(name="test2", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(2, len(c)) - - def test_multi_meter_payload(self): - cfg = yaml.dump( - {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", - event_type="objectstore.http.request", - type="delta", - unit="$.payload.measurements.[*].metric.[*].unit", - volume="$.payload.measurements.[*].result", - resource_id="$.payload.target_id", - project_id="$.payload.initiator.project_id", - lookup=["name", "volume", "unit"])]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(MIDDLEWARE_EVENT)) - self.assertEqual(2, len(c)) - s1 = c[0].as_dict() - self.assertEqual('storage.objects.outgoing.bytes', s1['name']) - self.assertEqual(28, s1['volume']) - self.assertEqual('B', s1['unit']) - s2 = c[1].as_dict() - self.assertEqual('storage.objects.incoming.bytes', s2['name']) - self.assertEqual(1, s2['volume']) - self.assertEqual('B', s2['unit']) - - def test_multi_meter_payload_single(self): - event = copy.deepcopy(MIDDLEWARE_EVENT) - del event['payload']['measurements'][1] - cfg = yaml.dump( - {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", - event_type="objectstore.http.request", - type="delta", - unit="$.payload.measurements.[*].metric.[*].unit", - volume="$.payload.measurements.[*].result", - resource_id="$.payload.target_id", - project_id="$.payload.initiator.project_id", - lookup=["name", "unit"])]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(event)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual('storage.objects.outgoing.bytes', s1['name']) - self.assertEqual(28, s1['volume']) - self.assertEqual('B', s1['unit']) - - def test_multi_meter_payload_none(self): - event = copy.deepcopy(MIDDLEWARE_EVENT) - del event['payload']['measurements'] - cfg = yaml.dump( - {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", - event_type="objectstore.http.request", - type="delta", - unit="$.payload.measurements.[*].metric.[*].unit", - volume="$.payload.measurements.[*].result", - resource_id="$.payload.target_id", - project_id="$.payload.initiator.project_id", - lookup="name")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(event)) - self.assertEqual(0, len(c)) - - def test_multi_meter_payload_all_multi(self): - cfg = yaml.dump( - {'metric': [dict(name="$.payload.[*].counter_name", - event_type="full.sample", - type="$.payload.[*].counter_type", - unit="$.payload.[*].counter_unit", - volume="$.payload.[*].counter_volume", - resource_id="$.payload.[*].resource_id", - project_id="$.payload.[*].project_id", - user_id="$.payload.[*].user_id", - lookup=['name', 'type', 'unit', 'volume', - 'resource_id', 'project_id', 'user_id'])]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(FULL_MULTI_MSG)) - self.assertEqual(2, len(c)) - msg = FULL_MULTI_MSG['payload'] - for idx, val in enumerate(c): - s1 = val.as_dict() - self.assertEqual(msg[idx]['counter_name'], s1['name']) - self.assertEqual(msg[idx]['counter_volume'], s1['volume']) - self.assertEqual(msg[idx]['counter_unit'], s1['unit']) - self.assertEqual(msg[idx]['counter_type'], s1['type']) - self.assertEqual(msg[idx]['resource_id'], s1['resource_id']) - self.assertEqual(msg[idx]['project_id'], s1['project_id']) - self.assertEqual(msg[idx]['user_id'], s1['user_id']) - - @mock.patch('ceilometer.meter.notifications.LOG') - def test_multi_meter_payload_invalid_missing(self, LOG): - event = copy.deepcopy(MIDDLEWARE_EVENT) - del event['payload']['measurements'][0]['result'] - del event['payload']['measurements'][1]['result'] - cfg = yaml.dump( - {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", - event_type="objectstore.http.request", - type="delta", - unit="$.payload.measurements.[*].metric.[*].unit", - volume="$.payload.measurements.[*].result", - resource_id="$.payload.target_id", - project_id="$.payload.initiator.project_id", - lookup=["name", "unit", "volume"])]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(event)) - self.assertEqual(0, len(c)) - LOG.warning.assert_called_with('Only 0 fetched meters contain ' - '"volume" field instead of 2.') - - @mock.patch('ceilometer.meter.notifications.LOG') - def test_multi_meter_payload_invalid_short(self, LOG): - event = copy.deepcopy(MIDDLEWARE_EVENT) - del event['payload']['measurements'][0]['result'] - cfg = yaml.dump( - {'metric': [dict(name="$.payload.measurements.[*].metric.[*].name", - event_type="objectstore.http.request", - type="delta", - unit="$.payload.measurements.[*].metric.[*].unit", - volume="$.payload.measurements.[*].result", - resource_id="$.payload.target_id", - project_id="$.payload.initiator.project_id", - lookup=["name", "unit", "volume"])]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(event)) - self.assertEqual(0, len(c)) - LOG.warning.assert_called_with('Only 1 fetched meters contain ' - '"volume" field instead of 2.') - - def test_arithmetic_expr_meter(self): - cfg = yaml.dump( - {'metric': [dict(name='compute.node.cpu.percent', - event_type="compute.metrics.update", - type='gauge', - unit="percent", - volume="$.payload.metrics[" - "?(@.name='cpu.percent')].value" - " * 100", - resource_id="$.payload.host + '_'" - " + $.payload.nodename")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(METRICS_UPDATE)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual('compute.node.cpu.percent', s1['name']) - self.assertEqual(2.7501485834103514, s1['volume']) - self.assertEqual("tianst_tianst.sh.intel.com", - s1['resource_id']) - - def test_string_expr_meter(self): - cfg = yaml.dump( - {'metric': [dict(name='compute.node.cpu.frequency', - event_type="compute.metrics.update", - type='gauge', - unit="ns", - volume="$.payload.metrics[?(@.name='cpu.frequency')]" - ".value", - resource_id="$.payload.host + '_'" - " + $.payload.nodename")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(METRICS_UPDATE)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual('compute.node.cpu.frequency', s1['name']) - self.assertEqual(1600, s1['volume']) - self.assertEqual("tianst_tianst.sh.intel.com", - s1['resource_id']) - - def test_prefix_expr_meter(self): - cfg = yaml.dump( - {'metric': [dict(name='compute.node.cpu.frequency', - event_type="compute.metrics.update", - type='gauge', - unit="ns", - volume="$.payload.metrics[?(@.name='cpu.frequency')]" - ".value", - resource_id="'prefix-' + $.payload.nodename")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(METRICS_UPDATE)) - self.assertEqual(1, len(c)) - s1 = c[0].as_dict() - self.assertEqual('compute.node.cpu.frequency', s1['name']) - self.assertEqual(1600, s1['volume']) - self.assertEqual("prefix-tianst.sh.intel.com", - s1['resource_id']) - - def test_duplicate_meter(self): - cfg = yaml.dump( - {'metric': [dict(name="test1", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id"), - dict(name="test1", - event_type="test.create", - type="delta", - unit="B", - volume="$.payload.volume", - resource_id="$.payload.resource_id", - project_id="$.payload.project_id")]}) - self._load_meter_def_file(cfg) - c = list(self.handler.process_notification(NOTIFICATION)) - self.assertEqual(1, len(c)) diff --git a/ceilometer/tests/unit/network/__init__.py b/ceilometer/tests/unit/network/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/network/services/__init__.py b/ceilometer/tests/unit/network/services/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/network/services/test_fwaas.py b/ceilometer/tests/unit/network/services/test_fwaas.py deleted file mode 100644 index bf300303..00000000 --- a/ceilometer/tests/unit/network/services/test_fwaas.py +++ /dev/null @@ -1,169 +0,0 @@ -# -# Copyright 2014 Cisco Systems,Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import base -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.agent import plugin_base -from ceilometer.network.services import discovery -from ceilometer.network.services import fwaas - - -class _BaseTestFWPollster(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(_BaseTestFWPollster, self).setUp() - self.addCleanup(mock.patch.stopall) - self.manager = manager.AgentManager() - plugin_base._get_keystone = mock.Mock() - catalog = (plugin_base._get_keystone.session.auth.get_access. - return_value.service_catalog) - catalog.get_endpoints = mock.MagicMock( - return_value={'network': mock.ANY}) - - -class TestFirewallPollster(_BaseTestFWPollster): - - def setUp(self): - super(TestFirewallPollster, self).setUp() - self.pollster = fwaas.FirewallPollster() - fake_fw = self.fake_get_fw_service() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'firewall_get_all', - return_value=fake_fw)) - - @staticmethod - def fake_get_fw_service(): - return [{'status': 'ACTIVE', - 'name': 'myfw', - 'description': '', - 'admin_state_up': True, - 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, - {'status': 'INACTIVE', - 'name': 'myfw', - 'description': '', - 'admin_state_up': True, - 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, - {'status': 'PENDING_CREATE', - 'name': 'myfw', - 'description': '', - 'admin_state_up': True, - 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, - {'status': 'error', - 'name': 'myfw', - 'description': '', - 'admin_state_up': True, - 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'firewall_policy_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa'}, - ] - - def test_fw_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fw_service())) - self.assertEqual(4, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_fw_service()[0][field], - samples[0].resource_metadata[field]) - - def test_vpn_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fw_service())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(2, samples[2].volume) - - def test_get_vpn_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fw_service())) - self.assertEqual(set(['network.services.firewall']), - set([s.name for s in samples])) - - def test_vpn_discovery(self): - discovered_fws = discovery.FirewallDiscovery().discover(self.manager) - self.assertEqual(3, len(discovered_fws)) - - for vpn in self.fake_get_fw_service(): - if vpn['status'] == 'error': - self.assertNotIn(vpn, discovered_fws) - else: - self.assertIn(vpn, discovered_fws) - - -class TestIPSecConnectionsPollster(_BaseTestFWPollster): - - def setUp(self): - super(TestIPSecConnectionsPollster, self).setUp() - self.pollster = fwaas.FirewallPolicyPollster() - fake_fw_policy = self.fake_get_fw_policy() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'fw_policy_get_all', - return_value=fake_fw_policy)) - - @staticmethod - def fake_get_fw_policy(): - return [{'name': 'my_fw_policy', - 'description': 'fw_policy', - 'admin_state_up': True, - 'tenant_id': 'abe3d818-fdcb-fg4b-de7f-6650dc8a9d7a', - 'firewall_rules': [{'enabled': True, - 'action': 'allow', - 'ip_version': 4, - 'protocol': 'tcp', - 'destination_port': '80', - 'source_ip_address': '10.24.4.2'}, - {'enabled': True, - 'action': 'deny', - 'ip_version': 4, - 'protocol': 'tcp', - 'destination_port': '22'}], - 'shared': True, - 'audited': True, - 'id': 'fdfbcec-fdcb-fg4b-de7f-6650dc8a9d7a'} - ] - - def test_policy_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fw_policy())) - self.assertEqual(1, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_fw_policy()[0][field], - samples[0].resource_metadata[field]) - - def test_get_policy_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fw_policy())) - self.assertEqual(set(['network.services.firewall.policy']), - set([s.name for s in samples])) - - def test_fw_policy_discovery(self): - discovered_policy = discovery.FirewallPolicyDiscovery().discover( - self.manager) - self.assertEqual(1, len(discovered_policy)) - self.assertEqual(self.fake_get_fw_policy(), discovered_policy) diff --git a/ceilometer/tests/unit/network/services/test_lbaas.py b/ceilometer/tests/unit/network/services/test_lbaas.py deleted file mode 100644 index c705d611..00000000 --- a/ceilometer/tests/unit/network/services/test_lbaas.py +++ /dev/null @@ -1,506 +0,0 @@ -# -# Copyright 2014 Cisco Systems,Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from oslo_config import cfg -from oslotest import base -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.agent import plugin_base -from ceilometer.network.services import discovery -from ceilometer.network.services import lbaas - - -class _BaseTestLBPollster(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(_BaseTestLBPollster, self).setUp() - self.addCleanup(mock.patch.stopall) - self.manager = manager.AgentManager() - cfg.CONF.set_override('neutron_lbaas_version', - 'v1', - group='service_types') - plugin_base._get_keystone = mock.Mock() - catalog = (plugin_base._get_keystone.session.auth.get_access. - return_value.service_catalog) - catalog.get_endpoints = mock.MagicMock( - return_value={'network': mock.ANY}) - - -class TestLBPoolPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBPoolPollster, self).setUp() - self.pollster = lbaas.LBPoolPollster() - fake_pools = self.fake_get_pools() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'pool_get_all', - return_value=fake_pools)) - - @staticmethod - def fake_get_pools(): - return [{'status': 'ACTIVE', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - {'status': 'INACTIVE', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb02', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - {'status': 'PENDING_CREATE', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb03', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - {'status': 'UNKNOWN', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb03', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - {'status': 'error', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb_error', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - ] - - def test_pool_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_pools())) - self.assertEqual(4, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_pools()[0][field], - samples[0].resource_metadata[field]) - - def test_pool_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_pools())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(2, samples[2].volume) - - def test_get_pool_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_pools())) - self.assertEqual(set(['network.services.lb.pool']), - set([s.name for s in samples])) - - def test_pool_discovery(self): - discovered_pools = discovery.LBPoolsDiscovery().discover(self.manager) - self.assertEqual(4, len(discovered_pools)) - for pool in self.fake_get_pools(): - if pool['status'] == 'error': - self.assertNotIn(pool, discovered_pools) - else: - self.assertIn(pool, discovered_pools) - - -class TestLBVipPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBVipPollster, self).setUp() - self.pollster = lbaas.LBVipPollster() - fake_vips = self.fake_get_vips() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'vip_get_all', - return_value=fake_vips)) - - @staticmethod - def fake_get_vips(): - return [{'status': 'ACTIVE', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.2', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip'}, - {'status': 'INACTIVE', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.3', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'ba6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip02'}, - {'status': 'PENDING_CREATE', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.4', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip03'}, - {'status': 'UNKNOWN', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.8', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip03'}, - {'status': 'error', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.8', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'fg6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip_error'}, - ] - - def test_vip_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vips())) - self.assertEqual(4, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_vips()[0][field], - samples[0].resource_metadata[field]) - - def test_pool_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vips())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(2, samples[2].volume) - - def test_get_vip_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vips())) - self.assertEqual(set(['network.services.lb.vip']), - set([s.name for s in samples])) - - def test_vip_discovery(self): - discovered_vips = discovery.LBVipsDiscovery().discover(self.manager) - self.assertEqual(4, len(discovered_vips)) - for pool in self.fake_get_vips(): - if pool['status'] == 'error': - self.assertNotIn(pool, discovered_vips) - else: - self.assertIn(pool, discovered_vips) - - -class TestLBMemberPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBMemberPollster, self).setUp() - self.pollster = lbaas.LBMemberPollster() - fake_members = self.fake_get_members() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'member_get_all', - return_value=fake_members)) - - @staticmethod - def fake_get_members(): - return [{'status': 'ACTIVE', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.3', - 'status_description': None, - 'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'}, - {'status': 'INACTIVE', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.5', - 'status_description': None, - 'id': '2456661eb-07bc-4372-9fbf-36459dd0f96b'}, - {'status': 'PENDING_CREATE', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.6', - 'status_description': None, - 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, - {'status': 'UNKNOWN', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.6', - 'status_description': None, - 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, - {'status': 'error', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.6', - 'status_description': None, - 'id': '45630b61eb-07bc-4372-9fbf-36459dd0f96b'}, - ] - - def test_get_samples_not_empty(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - self.fake_get_members())) - self.assertEqual(4, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_members()[0][field], - samples[0].resource_metadata[field]) - - def test_pool_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - self.fake_get_members())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(2, samples[2].volume) - - def test_get_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - self.fake_get_members())) - self.assertEqual(set(['network.services.lb.member']), - set([s.name for s in samples])) - - def test_members_discovery(self): - discovered_members = discovery.LBMembersDiscovery().discover( - self.manager) - self.assertEqual(4, len(discovered_members)) - for pool in self.fake_get_members(): - if pool['status'] == 'error': - self.assertNotIn(pool, discovered_members) - else: - self.assertIn(pool, discovered_members) - - -class TestLBHealthProbePollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBHealthProbePollster, self).setUp() - self.pollster = lbaas.LBHealthMonitorPollster() - fake_health_monitor = self.fake_get_health_monitor() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'health_monitor_get_all', - return_value=fake_health_monitor)) - - @staticmethod - def fake_get_health_monitor(): - return [{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365', - 'admin_state_up': True, - 'tenant_id': "d5d2817dae6b42159be9b665b64beb0e", - 'delay': 2, - 'max_retries': 5, - 'timeout': 5, - 'pools': [], - 'type': 'PING', - }] - - def test_get_samples_not_empty(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - self.fake_get_health_monitor())) - self.assertEqual(1, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_health_monitor()[0][field], - samples[0].resource_metadata[field]) - - def test_get_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - self.fake_get_health_monitor())) - self.assertEqual(set(['network.services.lb.health_monitor']), - set([s.name for s in samples])) - - def test_probes_discovery(self): - discovered_probes = discovery.LBHealthMonitorsDiscovery().discover( - self.manager) - self.assertEqual(discovered_probes, self.fake_get_health_monitor()) - - -class TestLBStatsPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBStatsPollster, self).setUp() - fake_pool_stats = self.fake_pool_stats() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'pool_stats', - return_value=fake_pool_stats)) - - fake_pools = self.fake_get_pools() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'pool_get_all', - return_value=fake_pools)) - - @staticmethod - def fake_get_pools(): - return [{'status': 'ACTIVE', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'provider': 'haproxy', - 'status_description': None, - 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - ] - - @staticmethod - def fake_pool_stats(): - return {'stats': {'active_connections': 2, - 'bytes_in': 1, - 'bytes_out': 3, - 'total_connections': 4 - } - } - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples(self, factory, sample_name, expected_volume, - expected_type): - pollster = factory() - cache = {} - samples = list(pollster.get_samples(self.manager, cache, - self.fake_get_pools())) - self.assertEqual(1, len(samples)) - self.assertIsNotNone(samples) - self.assertIn('lbstats', cache) - self.assertEqual(set([sample_name]), set([s.name for s in samples])) - - match = [s for s in samples if s.name == sample_name] - self.assertEqual(1, len(match), 'missing counter %s' % sample_name) - self.assertEqual(expected_volume, match[0].volume) - self.assertEqual(expected_type, match[0].type) - - def test_lb_total_connections(self): - self._check_get_samples(lbaas.LBTotalConnectionsPollster, - 'network.services.lb.total.connections', - 4, 'cumulative') - - def test_lb_active_connections(self): - self._check_get_samples(lbaas.LBActiveConnectionsPollster, - 'network.services.lb.active.connections', - 2, 'gauge') - - def test_lb_incoming_bytes(self): - self._check_get_samples(lbaas.LBBytesInPollster, - 'network.services.lb.incoming.bytes', - 1, 'gauge') - - def test_lb_outgoing_bytes(self): - self._check_get_samples(lbaas.LBBytesOutPollster, - 'network.services.lb.outgoing.bytes', - 3, 'gauge') diff --git a/ceilometer/tests/unit/network/services/test_lbaas_v2.py b/ceilometer/tests/unit/network/services/test_lbaas_v2.py deleted file mode 100644 index 42fc73ca..00000000 --- a/ceilometer/tests/unit/network/services/test_lbaas_v2.py +++ /dev/null @@ -1,303 +0,0 @@ -# -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from oslo_config import cfg -from oslotest import base -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.agent import plugin_base -from ceilometer.network.services import discovery -from ceilometer.network.services import lbaas - - -class _BaseTestLBPollster(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(_BaseTestLBPollster, self).setUp() - self.addCleanup(mock.patch.stopall) - self.manager = manager.AgentManager() - plugin_base._get_keystone = mock.Mock() - catalog = (plugin_base._get_keystone.session.auth.get_access. - return_value.service_catalog) - catalog.get_endpoints = mock.MagicMock( - return_value={'network': mock.ANY}) - - -class TestLBListenerPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBListenerPollster, self).setUp() - self.pollster = lbaas.LBListenerPollster() - self.pollster.lb_version = 'v2' - fake_listeners = self.fake_list_listeners() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'list_listener', - return_value=fake_listeners)) - - @staticmethod - def fake_list_listeners(): - return [{'default_pool_id': None, - 'protocol': 'HTTP', - 'description': '', - 'loadbalancers': [ - {'id': 'a9729389-6147-41a3-ab22-a24aed8692b2'}], - 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', - 'name': 'mylistener_online', - 'admin_state_up': True, - 'connection_limit': 100, - 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', - 'protocol_port': 80, - 'operating_status': 'ONLINE'}, - {'default_pool_id': None, - 'protocol': 'HTTP', - 'description': '', - 'loadbalancers': [ - {'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a'}], - 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylistener_offline', - 'admin_state_up': True, - 'connection_limit': 100, - 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', - 'protocol_port': 80, - 'operating_status': 'OFFLINE'}, - {'default_pool_id': None, - 'protocol': 'HTTP', - 'description': '', - 'loadbalancers': [ - {'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], - 'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'name': 'mylistener_error', - 'admin_state_up': True, - 'connection_limit': 100, - 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', - 'protocol_port': 80, - 'operating_status': 'ERROR'}, - {'default_pool_id': None, - 'protocol': 'HTTP', - 'description': '', - 'loadbalancers': [ - {'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], - 'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'name': 'mylistener_pending_create', - 'admin_state_up': True, - 'connection_limit': 100, - 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', - 'protocol_port': 80, - 'operating_status': 'PENDING_CREATE'} - ] - - def test_listener_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_list_listeners())) - self.assertEqual(3, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_list_listeners()[0][field], - samples[0].resource_metadata[field]) - - def test_listener_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_list_listeners())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(4, samples[2].volume) - - def test_list_listener_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_list_listeners())) - self.assertEqual(set(['network.services.lb.listener']), - set([s.name for s in samples])) - - def test_listener_discovery(self): - discovered_listeners = discovery.LBListenersDiscovery().discover( - self.manager) - self.assertEqual(4, len(discovered_listeners)) - for listener in self.fake_list_listeners(): - if listener['operating_status'] == 'pending_create': - self.assertNotIn(listener, discovered_listeners) - else: - self.assertIn(listener, discovered_listeners) - - -class TestLBLoadBalancerPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBLoadBalancerPollster, self).setUp() - self.pollster = lbaas.LBLoadBalancerPollster() - self.pollster.lb_version = 'v2' - fake_loadbalancers = self.fake_list_loadbalancers() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'list_loadbalancer', - return_value=fake_loadbalancers)) - - @staticmethod - def fake_list_loadbalancers(): - return [{'operating_status': 'ONLINE', - 'description': '', - 'admin_state_up': True, - 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', - 'provisioning_status': 'ACTIVE', - 'listeners': [{'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], - 'vip_address': '10.0.0.2', - 'vip_subnet_id': '013d3059-87a4-45a5-91e9-d721068ae0b2', - 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'loadbalancer_online'}, - {'operating_status': 'OFFLINE', - 'description': '', - 'admin_state_up': True, - 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', - 'provisioning_status': 'INACTIVE', - 'listeners': [{'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a'}], - 'vip_address': '10.0.0.3', - 'vip_subnet_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'name': 'loadbalancer_offline'}, - {'operating_status': 'ERROR', - 'description': '', - 'admin_state_up': True, - 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', - 'provisioning_status': 'INACTIVE', - 'listeners': [{'id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d8b'}], - 'vip_address': '10.0.0.4', - 'vip_subnet_id': '213d3059-87a4-45a5-91e9-d721068df0b2', - 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'loadbalancer_error'}, - {'operating_status': 'PENDING_CREATE', - 'description': '', - 'admin_state_up': True, - 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', - 'provisioning_status': 'INACTIVE', - 'listeners': [{'id': 'fe7rad36-437d-4c84-aee1-186027d4ed7c'}], - 'vip_address': '10.0.0.5', - 'vip_subnet_id': '123d3059-87a4-45a5-91e9-d721068ae0c3', - 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395763b2', - 'name': 'loadbalancer_pending_create'} - ] - - def test_loadbalancer_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_list_loadbalancers())) - self.assertEqual(3, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_list_loadbalancers()[0][field], - samples[0].resource_metadata[field]) - - def test_loadbalancer_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_list_loadbalancers())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(4, samples[2].volume) - - def test_list_loadbalancer_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_list_loadbalancers())) - self.assertEqual(set(['network.services.lb.loadbalancer']), - set([s.name for s in samples])) - - def test_loadbalancer_discovery(self): - discovered_loadbalancers = \ - discovery.LBLoadBalancersDiscovery().discover(self.manager) - self.assertEqual(4, len(discovered_loadbalancers)) - for loadbalancer in self.fake_list_loadbalancers(): - if loadbalancer['operating_status'] == 'pending_create': - self.assertNotIn(loadbalancer, discovered_loadbalancers) - else: - self.assertIn(loadbalancer, discovered_loadbalancers) - - -class TestLBStatsPollster(_BaseTestLBPollster): - - def setUp(self): - super(TestLBStatsPollster, self).setUp() - fake_balancer_stats = self.fake_balancer_stats() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'get_loadbalancer_stats', - return_value=fake_balancer_stats)) - - fake_loadbalancers = self.fake_list_loadbalancers() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'list_loadbalancer', - return_value=fake_loadbalancers)) - cfg.CONF.set_override('neutron_lbaas_version', - 'v2', - group='service_types') - - @staticmethod - def fake_list_loadbalancers(): - return [{'operating_status': 'ONLINE', - 'description': '', - 'admin_state_up': True, - 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', - 'provisioning_status': 'ACTIVE', - 'listeners': [{'id': 'fe7rad36-437d-4c84-aee1-186027d3bdcd'}], - 'vip_address': '10.0.0.2', - 'vip_subnet_id': '013d3059-87a4-45a5-91e9-d721068ae0b2', - 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'loadbalancer_online'}, - ] - - @staticmethod - def fake_balancer_stats(): - return {'active_connections': 2, - 'bytes_in': 1, - 'bytes_out': 3, - 'total_connections': 4} - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def _check_get_samples(self, factory, sample_name, expected_volume, - expected_type): - pollster = factory() - - cache = {} - samples = list(pollster.get_samples(self.manager, cache, - self.fake_list_loadbalancers())) - self.assertEqual(1, len(samples)) - self.assertIsNotNone(samples) - self.assertIn('lbstats', cache) - self.assertEqual(set([sample_name]), set([s.name for s in samples])) - - match = [s for s in samples if s.name == sample_name] - self.assertEqual(1, len(match), 'missing counter %s' % sample_name) - self.assertEqual(expected_volume, match[0].volume) - self.assertEqual(expected_type, match[0].type) - - def test_lb_total_connections(self): - self._check_get_samples(lbaas.LBTotalConnectionsPollster, - 'network.services.lb.total.connections', - 4, 'cumulative') - - def test_lb_active_connections(self): - self._check_get_samples(lbaas.LBActiveConnectionsPollster, - 'network.services.lb.active.connections', - 2, 'gauge') - - def test_lb_incoming_bytes(self): - self._check_get_samples(lbaas.LBBytesInPollster, - 'network.services.lb.incoming.bytes', - 1, 'gauge') - - def test_lb_outgoing_bytes(self): - self._check_get_samples(lbaas.LBBytesOutPollster, - 'network.services.lb.outgoing.bytes', - 3, 'gauge') diff --git a/ceilometer/tests/unit/network/services/test_vpnaas.py b/ceilometer/tests/unit/network/services/test_vpnaas.py deleted file mode 100644 index 399ff225..00000000 --- a/ceilometer/tests/unit/network/services/test_vpnaas.py +++ /dev/null @@ -1,176 +0,0 @@ -# -# Copyright 2014 Cisco Systems,Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import base -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.agent import plugin_base -from ceilometer.network.services import discovery -from ceilometer.network.services import vpnaas - - -class _BaseTestVPNPollster(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(_BaseTestVPNPollster, self).setUp() - self.addCleanup(mock.patch.stopall) - self.manager = manager.AgentManager() - plugin_base._get_keystone = mock.Mock() - catalog = (plugin_base._get_keystone.session.auth.get_access. - return_value.service_catalog) - catalog.get_endpoints = mock.MagicMock( - return_value={'network': mock.ANY}) - - -class TestVPNServicesPollster(_BaseTestVPNPollster): - - def setUp(self): - super(TestVPNServicesPollster, self).setUp() - self.pollster = vpnaas.VPNServicesPollster() - fake_vpn = self.fake_get_vpn_service() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'vpn_get_all', - return_value=fake_vpn)) - - @staticmethod - def fake_get_vpn_service(): - return [{'status': 'ACTIVE', - 'name': 'myvpn', - 'description': '', - 'admin_state_up': True, - 'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, - {'status': 'INACTIVE', - 'name': 'myvpn', - 'description': '', - 'admin_state_up': True, - 'id': 'cdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, - {'status': 'PENDING_CREATE', - 'name': 'myvpn', - 'description': '', - 'id': 'bdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, - {'status': 'error', - 'name': 'myvpn', - 'description': '', - 'id': 'edde3d818-fdcb-fg4b-de7f-6750dc8a9d7a', - 'admin_state_up': False, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'}, - ] - - def test_vpn_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vpn_service())) - self.assertEqual(4, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_vpn_service()[0][field], - samples[0].resource_metadata[field]) - - def test_vpn_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vpn_service())) - self.assertEqual(1, samples[0].volume) - self.assertEqual(0, samples[1].volume) - self.assertEqual(2, samples[2].volume) - - def test_get_vpn_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_vpn_service())) - self.assertEqual(set(['network.services.vpn']), - set([s.name for s in samples])) - - def test_vpn_discovery(self): - discovered_vpns = discovery.VPNServicesDiscovery().discover( - self.manager) - self.assertEqual(3, len(discovered_vpns)) - - for vpn in self.fake_get_vpn_service(): - if vpn['status'] == 'error': - self.assertNotIn(vpn, discovered_vpns) - else: - self.assertIn(vpn, discovered_vpns) - - -class TestIPSecConnectionsPollster(_BaseTestVPNPollster): - - def setUp(self): - super(TestIPSecConnectionsPollster, self).setUp() - self.pollster = vpnaas.IPSecConnectionsPollster() - fake_conns = self.fake_get_ipsec_connections() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'ipsec_site_connections_get_all', - return_value=fake_conns)) - - @staticmethod - def fake_get_ipsec_connections(): - return [{'name': 'connection1', - 'description': 'Remote-connection1', - 'peer_address': '192.168.1.10', - 'peer_id': '192.168.1.10', - 'peer_cidrs': ['192.168.2.0/24', - '192.168.3.0/24'], - 'mtu': 1500, - 'psk': 'abcd', - 'initiator': 'bi-directional', - 'dpd': { - 'action': 'hold', - 'interval': 30, - 'timeout': 120}, - 'ikepolicy_id': 'ade3d818-fdcb-fg4b-de7f-4550dc8a9d7a', - 'ipsecpolicy_id': 'fce3d818-fdcb-fg4b-de7f-7850dc8a9d7a', - 'vpnservice_id': 'dce3d818-fdcb-fg4b-de7f-5650dc8a9d7a', - 'admin_state_up': True, - 'status': 'ACTIVE', - 'tenant_id': 'abe3d818-fdcb-fg4b-de7f-6650dc8a9d7a', - 'id': 'fdfbcec-fdcb-fg4b-de7f-6650dc8a9d7a'} - ] - - def test_conns_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_ipsec_connections())) - self.assertEqual(1, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_ipsec_connections()[0][field], - samples[0].resource_metadata[field]) - - def test_get_conns_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_ipsec_connections())) - self.assertEqual(set(['network.services.vpn.connections']), - set([s.name for s in samples])) - - def test_conns_discovery(self): - discovered_conns = discovery.IPSecConnectionsDiscovery().discover( - self.manager) - self.assertEqual(1, len(discovered_conns)) - self.assertEqual(self.fake_get_ipsec_connections(), discovered_conns) diff --git a/ceilometer/tests/unit/network/statistics/__init__.py b/ceilometer/tests/unit/network/statistics/__init__.py deleted file mode 100644 index 8602c6a8..00000000 --- a/ceilometer/tests/unit/network/statistics/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslotest import base - - -class _PollsterTestBase(base.BaseTestCase): - - def _test_pollster(self, pollster_class, meter_name, - meter_type, meter_unit): - - pollster = pollster_class() - - self.assertEqual(pollster.meter_name, meter_name) - self.assertEqual(pollster.meter_type, meter_type) - self.assertEqual(pollster.meter_unit, meter_unit) diff --git a/ceilometer/tests/unit/network/statistics/opencontrail/__init__.py b/ceilometer/tests/unit/network/statistics/opencontrail/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/network/statistics/opencontrail/test_client.py b/ceilometer/tests/unit/network/statistics/opencontrail/test_client.py deleted file mode 100644 index fdee69c9..00000000 --- a/ceilometer/tests/unit/network/statistics/opencontrail/test_client.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import fixture as config_fixture -from oslotest import base - -from ceilometer.network.statistics.opencontrail import client -from ceilometer import service as ceilometer_service - - -class TestOpencontrailClient(base.BaseTestCase): - - def setUp(self): - super(TestOpencontrailClient, self).setUp() - self.conf = self.useFixture(config_fixture.Config()) - ceilometer_service.prepare_service(argv=[], config_files=[]) - self.client = client.Client('http://127.0.0.1:8081', {'arg1': 'aaa'}) - - self.get_resp = mock.MagicMock() - self.get = mock.patch('requests.get', - return_value=self.get_resp).start() - self.get_resp.raw.version = 1.1 - self.get_resp.status_code = 200 - self.get_resp.reason = 'OK' - self.get_resp.content = '' - - def test_vm_statistics(self): - self.client.networks.get_vm_statistics('bbb') - - call_args = self.get.call_args_list[0][0] - call_kwargs = self.get.call_args_list[0][1] - - expected_url = ('http://127.0.0.1:8081/analytics/' - 'uves/virtual-machine/bbb') - self.assertEqual(expected_url, call_args[0]) - - data = call_kwargs.get('data') - - expected_data = {'arg1': 'aaa'} - self.assertEqual(expected_data, data) - - def test_vm_statistics_params(self): - self.client.networks.get_vm_statistics('bbb', - {'resource': 'fip_stats_list', - 'virtual_network': 'ccc'}) - - call_args = self.get.call_args_list[0][0] - call_kwargs = self.get.call_args_list[0][1] - - expected_url = ('http://127.0.0.1:8081/analytics/' - 'uves/virtual-machine/bbb') - self.assertEqual(expected_url, call_args[0]) - - data = call_kwargs.get('data') - - expected_data = {'arg1': 'aaa', - 'resource': 'fip_stats_list', - 'virtual_network': 'ccc'} - self.assertEqual(expected_data, data) diff --git a/ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py b/ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py deleted file mode 100644 index c308f446..00000000 --- a/ceilometer/tests/unit/network/statistics/opencontrail/test_driver.py +++ /dev/null @@ -1,254 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import base -from six.moves.urllib import parse as urlparse - -from ceilometer.network.statistics.opencontrail import driver - - -class TestOpencontrailDriver(base.BaseTestCase): - - def setUp(self): - super(TestOpencontrailDriver, self).setUp() - - self.nc_ports = mock.patch('ceilometer.neutron_client' - '.Client.port_get_all', - return_value=self.fake_ports()) - self.nc_ports.start() - - self.driver = driver.OpencontrailDriver() - self.parse_url = urlparse.ParseResult('opencontrail', - '127.0.0.1:8143', - '/', None, None, None) - self.params = {'password': ['admin'], - 'scheme': ['http'], - 'username': ['admin'], - 'verify_ssl': ['false'], - 'resource': ['if_stats_list']} - - @staticmethod - def fake_ports(): - return [{'admin_state_up': True, - 'device_owner': 'compute:None', - 'device_id': '674e553b-8df9-4321-87d9-93ba05b93558', - 'extra_dhcp_opts': [], - 'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442', - 'mac_address': 'fa:16:3e:c5:35:93', - 'name': '', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'status': 'ACTIVE', - 'tenant_id': '89271fa581ab4380bf172f868c3615f9'}] - - @staticmethod - def fake_port_stats(): - return {"value": [{ - "name": "c588ebb7-ae52-485a-9f0c-b2791c5da196", - "value": { - "UveVirtualMachineAgent": { - "if_stats_list": [{ - "out_bytes": 22, - "in_bandwidth_usage": 0, - "in_bytes": 23, - "out_bandwidth_usage": 0, - "out_pkts": 5, - "in_pkts": 6, - "name": ("default-domain:demo:" - "96d49cc3-4e01-40ce-9cac-c0e32642a442") - }], - "fip_stats_list": [{ - "in_bytes": 33, - "iface_name": ("default-domain:demo:" - "96d49cc3-4e01-40ce-9cac-c0e32642a442"), - "out_bytes": 44, - "out_pkts": 10, - "virtual_network": "default-domain:openstack:public", - "in_pkts": 11, - "ip_address": "1.1.1.1" - }] - }}}]} - - @staticmethod - def fake_port_stats_with_node(): - return {"value": [{ - "name": "c588ebb7-ae52-485a-9f0c-b2791c5da196", - "value": { - "UveVirtualMachineAgent": { - "if_stats_list": [ - [[{ - "out_bytes": 22, - "in_bandwidth_usage": 0, - "in_bytes": 23, - "out_bandwidth_usage": 0, - "out_pkts": 5, - "in_pkts": 6, - "name": ("default-domain:demo:" - "96d49cc3-4e01-40ce-9cac-c0e32642a442") - }], 'node1'], - [[{ - "out_bytes": 22, - "in_bandwidth_usage": 0, - "in_bytes": 23, - "out_bandwidth_usage": 0, - "out_pkts": 4, - "in_pkts": 13, - "name": ("default-domain:demo:" - "96d49cc3-4e01-40ce-9cac-c0e32642a442")}], - 'node2'] - ] - }}}]} - - def _test_meter(self, meter_name, expected, fake_port_stats=None): - if not fake_port_stats: - fake_port_stats = self.fake_port_stats() - with mock.patch('ceilometer.network.' - 'statistics.opencontrail.' - 'client.NetworksAPIClient.' - 'get_vm_statistics', - return_value=fake_port_stats) as port_stats: - - samples = self.driver.get_sample_data(meter_name, self.parse_url, - self.params, {}) - - self.assertEqual(expected, [s for s in samples]) - - port_stats.assert_called_with('*') - - def test_switch_port_receive_packets_with_node(self): - expected = [(6, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'},), - (13, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'},)] - self._test_meter('switch.port.receive.packets', expected, - self.fake_port_stats_with_node()) - - def test_switch_port_receive_packets(self): - expected = [(6, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'},)] - self._test_meter('switch.port.receive.packets', expected) - - def test_switch_port_transmit_packets(self): - expected = [(5, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'},)] - self._test_meter('switch.port.transmit.packets', expected) - - def test_switch_port_receive_bytes(self): - expected = [(23, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'},)] - self._test_meter('switch.port.receive.bytes', expected) - - def test_switch_port_transmit_bytes(self): - expected = [(22, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'if_stats_list'},)] - self._test_meter('switch.port.transmit.bytes', expected) - - def test_switch_port_receive_packets_fip(self): - self.params['resource'] = ['fip_stats_list'] - expected = [(11, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'fip_stats_list'},)] - self._test_meter('switch.port.receive.packets', expected) - - def test_switch_port_transmit_packets_fip(self): - self.params['resource'] = ['fip_stats_list'] - expected = [(10, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'fip_stats_list'},)] - self._test_meter('switch.port.transmit.packets', expected) - - def test_switch_port_receive_bytes_fip(self): - self.params['resource'] = ['fip_stats_list'] - expected = [(33, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'fip_stats_list'},)] - self._test_meter('switch.port.receive.bytes', expected) - - def test_switch_port_transmit_bytes_fip(self): - self.params['resource'] = ['fip_stats_list'] - expected = [(44, - '96d49cc3-4e01-40ce-9cac-c0e32642a442', - {'device_owner_id': - '674e553b-8df9-4321-87d9-93ba05b93558', - 'domain': 'default-domain', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'project': 'demo', - 'project_id': '89271fa581ab4380bf172f868c3615f9', - 'resource': 'fip_stats_list'},)] - self._test_meter('switch.port.transmit.bytes', expected) - - def test_switch_port_transmit_bytes_non_existing_network(self): - self.params['virtual_network'] = ['aaa'] - self.params['resource'] = ['fip_stats_list'] - self._test_meter('switch.port.transmit.bytes', []) diff --git a/ceilometer/tests/unit/network/statistics/opendaylight/__init__.py b/ceilometer/tests/unit/network/statistics/opendaylight/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/network/statistics/opendaylight/test_client.py b/ceilometer/tests/unit/network/statistics/opendaylight/test_client.py deleted file mode 100644 index 7b2250ee..00000000 --- a/ceilometer/tests/unit/network/statistics/opendaylight/test_client.py +++ /dev/null @@ -1,176 +0,0 @@ -# -# Copyright 2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mock -from oslo_config import fixture as config_fixture -from oslotest import base -from requests import auth as req_auth -import six -from six.moves.urllib import parse as urlparse - -from ceilometer.i18n import _ -from ceilometer.network.statistics.opendaylight import client -from ceilometer import service as ceilometer_service - - -class TestClientHTTPBasicAuth(base.BaseTestCase): - - auth_way = 'basic' - scheme = 'http' - - def setUp(self): - super(TestClientHTTPBasicAuth, self).setUp() - self.conf = self.useFixture(config_fixture.Config()) - ceilometer_service.prepare_service(argv=[], config_files=[]) - self.parsed_url = urlparse.urlparse( - 'http://127.0.0.1:8080/controller/nb/v2?container_name=default&' - 'container_name=egg&auth=%s&user=admin&password=admin_pass&' - 'scheme=%s' % (self.auth_way, self.scheme)) - self.params = urlparse.parse_qs(self.parsed_url.query) - self.endpoint = urlparse.urlunparse( - urlparse.ParseResult(self.scheme, - self.parsed_url.netloc, - self.parsed_url.path, - None, None, None)) - odl_params = {'auth': self.params.get('auth')[0], - 'user': self.params.get('user')[0], - 'password': self.params.get('password')[0]} - self.client = client.Client(self.endpoint, odl_params) - - self.resp = mock.MagicMock() - self.get = mock.patch('requests.get', - return_value=self.resp).start() - - self.resp.raw.version = 1.1 - self.resp.status_code = 200 - self.resp.reason = 'OK' - self.resp.headers = {} - self.resp.content = 'dummy' - - def _test_request(self, method, url): - data = method('default') - - call_args = self.get.call_args_list[0][0] - call_kwargs = self.get.call_args_list[0][1] - - # check url - real_url = url % {'container_name': 'default', - 'scheme': self.scheme} - self.assertEqual(real_url, call_args[0]) - - # check auth parameters - auth = call_kwargs.get('auth') - if self.auth_way == 'digest': - self.assertIsInstance(auth, req_auth.HTTPDigestAuth) - else: - self.assertIsInstance(auth, req_auth.HTTPBasicAuth) - self.assertEqual('admin', auth.username) - self.assertEqual('admin_pass', auth.password) - - # check header - self.assertEqual( - {'Accept': 'application/json'}, - call_kwargs['headers']) - - # check return value - self.assertEqual(self.get().json(), data) - - def test_flow_statistics(self): - self._test_request( - self.client.statistics.get_flow_statistics, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/statistics/%(container_name)s/flow') - - def test_port_statistics(self): - self._test_request( - self.client.statistics.get_port_statistics, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/statistics/%(container_name)s/port') - - def test_table_statistics(self): - self._test_request( - self.client.statistics.get_table_statistics, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/statistics/%(container_name)s/table') - - def test_topology(self): - self._test_request( - self.client.topology.get_topology, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/topology/%(container_name)s') - - def test_user_links(self): - self._test_request( - self.client.topology.get_user_links, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/topology/%(container_name)s/userLinks') - - def test_switch(self): - self._test_request( - self.client.switch_manager.get_nodes, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/switchmanager/%(container_name)s/nodes') - - def test_active_hosts(self): - self._test_request( - self.client.host_tracker.get_active_hosts, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/hosttracker/%(container_name)s/hosts/active') - - def test_inactive_hosts(self): - self._test_request( - self.client.host_tracker.get_inactive_hosts, - '%(scheme)s://127.0.0.1:8080/controller/nb/v2' - '/hosttracker/%(container_name)s/hosts/inactive') - - def test_http_error(self): - self.resp.status_code = 404 - self.resp.reason = 'Not Found' - - try: - self.client.statistics.get_flow_statistics('default') - self.fail('') - except client.OpenDaylightRESTAPIFailed as e: - self.assertEqual( - _('OpenDaylitght API returned %(status)s %(reason)s') % - {'status': self.resp.status_code, - 'reason': self.resp.reason}, - six.text_type(e)) - - def test_other_error(self): - - class _Exception(Exception): - pass - - self.get = mock.patch('requests.get', - side_effect=_Exception).start() - - self.assertRaises(_Exception, - self.client.statistics.get_flow_statistics, - 'default') - - -class TestClientHTTPDigestAuth(TestClientHTTPBasicAuth): - - auth_way = 'digest' - - -class TestClientHTTPSBasicAuth(TestClientHTTPBasicAuth): - - scheme = 'https' - - -class TestClientHTTPSDigestAuth(TestClientHTTPDigestAuth): - - scheme = 'https' diff --git a/ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py b/ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py deleted file mode 100644 index 891b3a1d..00000000 --- a/ceilometer/tests/unit/network/statistics/opendaylight/test_driver.py +++ /dev/null @@ -1,1705 +0,0 @@ -# -# Copyright 2013 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import abc - -import mock -from oslotest import base -import six -from six import moves -from six.moves.urllib import parse as url_parse - -from ceilometer.network.statistics.opendaylight import driver - - -@six.add_metaclass(abc.ABCMeta) -class _Base(base.BaseTestCase): - - @abc.abstractproperty - def flow_data(self): - pass - - @abc.abstractproperty - def port_data(self): - pass - - @abc.abstractproperty - def table_data(self): - pass - - @abc.abstractproperty - def topology_data(self): - pass - - @abc.abstractproperty - def switch_data(self): - pass - - @abc.abstractproperty - def user_links_data(self): - pass - - @abc.abstractproperty - def active_hosts_data(self): - pass - - @abc.abstractproperty - def inactive_hosts_data(self): - pass - - fake_odl_url = url_parse.ParseResult('opendaylight', - 'localhost:8080', - 'controller/nb/v2', - None, - None, - None) - - fake_params = url_parse.parse_qs('user=admin&password=admin&scheme=http&' - 'container_name=default&auth=basic') - - fake_params_multi_container = ( - url_parse.parse_qs('user=admin&password=admin&scheme=http&' - 'container_name=first&container_name=second&' - 'auth=basic')) - - def setUp(self): - super(_Base, self).setUp() - self.addCleanup(mock.patch.stopall) - - self.driver = driver.OpenDayLightDriver() - - self.get_flow_statistics = mock.patch( - 'ceilometer.network.statistics.opendaylight.client.' - 'StatisticsAPIClient.get_flow_statistics', - return_value=self.flow_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'StatisticsAPIClient.get_table_statistics', - return_value=self.table_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'StatisticsAPIClient.get_port_statistics', - return_value=self.port_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'TopologyAPIClient.get_topology', - return_value=self.topology_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'TopologyAPIClient.get_user_links', - return_value=self.user_links_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'SwitchManagerAPIClient.get_nodes', - return_value=self.switch_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'HostTrackerAPIClient.get_active_hosts', - return_value=self.active_hosts_data).start() - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'HostTrackerAPIClient.get_inactive_hosts', - return_value=self.inactive_hosts_data).start() - - def _test_for_meter(self, meter_name, expected_data): - sample_data = self.driver.get_sample_data(meter_name, - self.fake_odl_url, - self.fake_params, - {}) - - for sample, expected in moves.zip(sample_data, expected_data): - self.assertEqual(expected[0], sample[0]) # check volume - self.assertEqual(expected[1], sample[1]) # check resource id - self.assertEqual(expected[2], sample[2]) # check resource metadata - - -class TestOpenDayLightDriverSpecial(_Base): - - flow_data = {"flowStatistics": []} - port_data = {"portStatistics": []} - table_data = {"tableStatistics": []} - topology_data = {"edgeProperties": []} - switch_data = {"nodeProperties": []} - user_links_data = {"userLinks": []} - active_hosts_data = {"hostConfig": []} - inactive_hosts_data = {"hostConfig": []} - - def test_not_implemented_meter(self): - sample_data = self.driver.get_sample_data('egg', - self.fake_odl_url, - self.fake_params, - {}) - self.assertIsNone(sample_data) - - sample_data = self.driver.get_sample_data('switch.table.egg', - self.fake_odl_url, - self.fake_params, - {}) - self.assertIsNone(sample_data) - - def test_cache(self): - cache = {} - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params, - cache) - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params, - cache) - self.assertEqual(1, self.get_flow_statistics.call_count) - - cache = {} - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params, - cache) - self.assertEqual(2, self.get_flow_statistics.call_count) - - def test_multi_container(self): - cache = {} - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params_multi_container, - cache) - self.assertEqual(2, self.get_flow_statistics.call_count) - - self.assertIn('network.statistics.opendaylight', cache) - - odl_data = cache['network.statistics.opendaylight'] - - self.assertIn('first', odl_data) - self.assertIn('second', odl_data) - - def test_http_error(self): - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'StatisticsAPIClient.get_flow_statistics', - side_effect=Exception()).start() - - sample_data = self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params, - {}) - - self.assertEqual(0, len(sample_data)) - - mock.patch('ceilometer.network.statistics.opendaylight.client.' - 'StatisticsAPIClient.get_flow_statistics', - side_effect=[Exception(), self.flow_data]).start() - cache = {} - self.driver.get_sample_data('switch', - self.fake_odl_url, - self.fake_params_multi_container, - cache) - - self.assertIn('network.statistics.opendaylight', cache) - - odl_data = cache['network.statistics.opendaylight'] - - self.assertIn('second', odl_data) - - -class TestOpenDayLightDriverSimple(_Base): - - flow_data = { - "flowStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "flowStatistic": [ - { - "flow": { - "match": { - "matchField": [ - { - "type": "DL_TYPE", - "value": "2048" - }, - { - "mask": "255.255.255.255", - "type": "NW_DST", - "value": "1.1.1.1" - } - ] - }, - "actions": { - "@type": "output", - "port": { - "id": "3", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - } - }, - "hardTimeout": "0", - "id": "0", - "idleTimeout": "0", - "priority": "1" - }, - "byteCount": "0", - "durationNanoseconds": "397000000", - "durationSeconds": "1828", - "packetCount": "0", - "tableId": "0" - }, - ] - } - ] - } - port_data = { - "portStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "portStatistic": [ - { - "nodeConnector": { - "id": "4", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "0", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "0", - "transmitBytes": "0", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "0" - }, - ] - } - ] - } - table_data = { - "tableStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "tableStatistic": [ - { - "activeCount": "11", - "lookupCount": "816", - "matchedCount": "220", - "nodeTable": { - "id": "0", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - } - } - }, - ] - } - ] - } - topology_data = {"edgeProperties": []} - switch_data = { - "nodeProperties": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "properties": { - "actions": { - "value": "4095" - }, - "timeStamp": { - "name": "connectedSince", - "value": "1377291227877" - } - } - }, - ] - } - user_links_data = {"userLinks": []} - active_hosts_data = {"hostConfig": []} - inactive_hosts_data = {"hostConfig": []} - - def test_meter_switch(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - "properties_actions": "4095", - "properties_timeStamp_connectedSince": "1377291227877" - }), - ] - - self._test_for_meter('switch', expected_data) - - def test_meter_switch_port(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4', - }), - ] - self._test_for_meter('switch.port', expected_data) - - def test_meter_switch_port_receive_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.packets', expected_data) - - def test_meter_switch_port_transmit_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.transmit.packets', expected_data) - - def test_meter_switch_port_receive_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.bytes', expected_data) - - def test_meter_switch_port_transmit_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.transmit.bytes', expected_data) - - def test_meter_switch_port_receive_drops(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.drops', expected_data) - - def test_meter_switch_port_transmit_drops(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.transmit.drops', expected_data) - - def test_meter_switch_port_receive_errors(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.errors', expected_data) - - def test_meter_switch_port_transmit_errors(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.transmit.errors', expected_data) - - def test_meter_switch_port_receive_frame_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.frame_error', expected_data) - - def test_meter_switch_port_receive_overrun_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.overrun_error', - expected_data) - - def test_meter_switch_port_receive_crc_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.receive.crc_error', expected_data) - - def test_meter_switch_port_collision_count(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - ] - self._test_for_meter('switch.port.collision.count', expected_data) - - def test_meter_switch_table(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - ] - self._test_for_meter('switch.table', expected_data) - - def test_meter_switch_table_active_entries(self): - expected_data = [ - (11, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - ] - self._test_for_meter('switch.table.active.entries', expected_data) - - def test_meter_switch_table_lookup_packets(self): - expected_data = [ - (816, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - ] - self._test_for_meter('switch.table.lookup.packets', expected_data) - - def test_meter_switch_table_matched_packets(self): - expected_data = [ - (220, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - ] - self._test_for_meter('switch.table.matched.packets', expected_data) - - def test_meter_switch_flow(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1" - }), - ] - self._test_for_meter('switch.flow', expected_data) - - def test_meter_switch_flow_duration_seconds(self): - expected_data = [ - (1828, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.duration_seconds', expected_data) - - def test_meter_switch_flow_duration_nanoseconds(self): - expected_data = [ - (397000000, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.duration_nanoseconds', expected_data) - - def test_meter_switch_flow_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.packets', expected_data) - - def test_meter_switch_flow_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.bytes', expected_data) - - -class TestOpenDayLightDriverComplex(_Base): - - flow_data = { - "flowStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "flowStatistic": [ - { - "flow": { - "match": { - "matchField": [ - { - "type": "DL_TYPE", - "value": "2048" - }, - { - "mask": "255.255.255.255", - "type": "NW_DST", - "value": "1.1.1.1" - } - ] - }, - "actions": { - "@type": "output", - "port": { - "id": "3", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - } - }, - "hardTimeout": "0", - "id": "0", - "idleTimeout": "0", - "priority": "1" - }, - "byteCount": "0", - "durationNanoseconds": "397000000", - "durationSeconds": "1828", - "packetCount": "0", - "tableId": "0" - }, - { - "flow": { - "match": { - "matchField": [ - { - "type": "DL_TYPE", - "value": "2048" - }, - { - "mask": "255.255.255.255", - "type": "NW_DST", - "value": "1.1.1.2" - } - ] - }, - "actions": { - "@type": "output", - "port": { - "id": "4", - "node": { - "id": "00:00:00:00:00:00:00:03", - "type": "OF" - }, - "type": "OF" - } - }, - "hardTimeout": "0", - "id": "0", - "idleTimeout": "0", - "priority": "1" - }, - "byteCount": "89", - "durationNanoseconds": "200000", - "durationSeconds": "5648", - "packetCount": "30", - "tableId": "1" - } - ] - } - ] - } - port_data = { - "portStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "portStatistic": [ - { - "nodeConnector": { - "id": "4", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "0", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "0", - "transmitBytes": "0", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "0" - }, - { - "nodeConnector": { - "id": "3", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "12740", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "182", - "transmitBytes": "12110", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "173" - }, - { - "nodeConnector": { - "id": "2", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "12180", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "174", - "transmitBytes": "12670", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "181" - }, - { - "nodeConnector": { - "id": "1", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "0", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "0", - "transmitBytes": "0", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "0" - }, - { - "nodeConnector": { - "id": "0", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "collisionCount": "0", - "receiveBytes": "0", - "receiveCrcError": "0", - "receiveDrops": "0", - "receiveErrors": "0", - "receiveFrameError": "0", - "receiveOverRunError": "0", - "receivePackets": "0", - "transmitBytes": "0", - "transmitDrops": "0", - "transmitErrors": "0", - "transmitPackets": "0" - } - ] - } - ] - } - table_data = { - "tableStatistics": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "tableStatistic": [ - { - "activeCount": "11", - "lookupCount": "816", - "matchedCount": "220", - "nodeTable": { - "id": "0", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - } - } - }, - { - "activeCount": "20", - "lookupCount": "10", - "matchedCount": "5", - "nodeTable": { - "id": "1", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - } - } - } - ] - } - ] - } - topology_data = { - "edgeProperties": [ - { - "edge": { - "headNodeConnector": { - "id": "2", - "node": { - "id": "00:00:00:00:00:00:00:03", - "type": "OF" - }, - "type": "OF" - }, - "tailNodeConnector": { - "id": "2", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - } - }, - "properties": { - "bandwidth": { - "value": 10000000000 - }, - "config": { - "value": 1 - }, - "name": { - "value": "s2-eth3" - }, - "state": { - "value": 1 - }, - "timeStamp": { - "name": "creation", - "value": 1379527162648 - } - } - }, - { - "edge": { - "headNodeConnector": { - "id": "5", - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "type": "OF" - }, - "tailNodeConnector": { - "id": "2", - "node": { - "id": "00:00:00:00:00:00:00:04", - "type": "OF" - }, - "type": "OF" - } - }, - "properties": { - "timeStamp": { - "name": "creation", - "value": 1379527162648 - } - } - } - ] - } - switch_data = { - "nodeProperties": [ - { - "node": { - "id": "00:00:00:00:00:00:00:02", - "type": "OF" - }, - "properties": { - "actions": { - "value": "4095" - }, - "buffers": { - "value": "256" - }, - "capabilities": { - "value": "199" - }, - "description": { - "value": "None" - }, - "macAddress": { - "value": "00:00:00:00:00:02" - }, - "tables": { - "value": "-1" - }, - "timeStamp": { - "name": "connectedSince", - "value": "1377291227877" - } - } - }, - { - "node": { - "id": "00:00:00:00:00:00:00:03", - "type": "OF" - }, - "properties": { - "actions": { - "value": "1024" - }, - "buffers": { - "value": "512" - }, - "capabilities": { - "value": "1000" - }, - "description": { - "value": "Foo Bar" - }, - "macAddress": { - "value": "00:00:00:00:00:03" - }, - "tables": { - "value": "10" - }, - "timeStamp": { - "name": "connectedSince", - "value": "1377291228000" - } - } - } - ] - } - user_links_data = { - "userLinks": [ - { - "dstNodeConnector": "OF|5@OF|00:00:00:00:00:00:00:05", - "name": "link1", - "srcNodeConnector": "OF|3@OF|00:00:00:00:00:00:00:02", - "status": "Success" - } - ] - } - active_hosts_data = { - "hostConfig": [ - { - "dataLayerAddress": "00:00:00:00:01:01", - "networkAddress": "1.1.1.1", - "nodeConnectorId": "9", - "nodeConnectorType": "OF", - "nodeId": "00:00:00:00:00:00:00:01", - "nodeType": "OF", - "staticHost": "false", - "vlan": "0" - }, - { - "dataLayerAddress": "00:00:00:00:02:02", - "networkAddress": "2.2.2.2", - "nodeConnectorId": "1", - "nodeConnectorType": "OF", - "nodeId": "00:00:00:00:00:00:00:02", - "nodeType": "OF", - "staticHost": "true", - "vlan": "0" - } - ] - } - inactive_hosts_data = { - "hostConfig": [ - { - "dataLayerAddress": "00:00:00:01:01:01", - "networkAddress": "1.1.1.3", - "nodeConnectorId": "8", - "nodeConnectorType": "OF", - "nodeId": "00:00:00:00:00:00:00:01", - "nodeType": "OF", - "staticHost": "false", - "vlan": "0" - }, - { - "dataLayerAddress": "00:00:00:01:02:02", - "networkAddress": "2.2.2.4", - "nodeConnectorId": "0", - "nodeConnectorType": "OF", - "nodeId": "00:00:00:00:00:00:00:02", - "nodeType": "OF", - "staticHost": "false", - "vlan": "1" - } - ] - } - - def test_meter_switch(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - "properties_actions": "4095", - "properties_buffers": "256", - "properties_capabilities": "199", - "properties_description": "None", - "properties_macAddress": "00:00:00:00:00:02", - "properties_tables": "-1", - "properties_timeStamp_connectedSince": "1377291227877" - }), - (1, "00:00:00:00:00:00:00:03", { - 'controller': 'OpenDaylight', - 'container': 'default', - "properties_actions": "1024", - "properties_buffers": "512", - "properties_capabilities": "1000", - "properties_description": "Foo Bar", - "properties_macAddress": "00:00:00:00:00:03", - "properties_tables": "10", - "properties_timeStamp_connectedSince": "1377291228000" - }), - ] - - self._test_for_meter('switch', expected_data) - - def test_meter_switch_port(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4', - }), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3', - 'user_link_node_id': '00:00:00:00:00:00:00:05', - 'user_link_node_port': '5', - 'user_link_status': 'Success', - 'user_link_name': 'link1', - }), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2', - 'topology_node_id': '00:00:00:00:00:00:00:03', - 'topology_node_port': '2', - "topology_bandwidth": 10000000000, - "topology_config": 1, - "topology_name": "s2-eth3", - "topology_state": 1, - "topology_timeStamp_creation": 1379527162648 - }), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1', - 'host_status': 'active', - 'host_dataLayerAddress': '00:00:00:00:02:02', - 'host_networkAddress': '2.2.2.2', - 'host_staticHost': 'true', - 'host_vlan': '0', - }), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0', - 'host_status': 'inactive', - 'host_dataLayerAddress': '00:00:00:01:02:02', - 'host_networkAddress': '2.2.2.4', - 'host_staticHost': 'false', - 'host_vlan': '1', - }), - ] - self._test_for_meter('switch.port', expected_data) - - def test_meter_switch_port_receive_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (182, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (174, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.packets', expected_data) - - def test_meter_switch_port_transmit_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (173, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (181, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.transmit.packets', expected_data) - - def test_meter_switch_port_receive_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (12740, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (12180, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.bytes', expected_data) - - def test_meter_switch_port_transmit_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (12110, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (12670, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.transmit.bytes', expected_data) - - def test_meter_switch_port_receive_drops(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.drops', expected_data) - - def test_meter_switch_port_transmit_drops(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.transmit.drops', expected_data) - - def test_meter_switch_port_receive_errors(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.errors', expected_data) - - def test_meter_switch_port_transmit_errors(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.transmit.errors', expected_data) - - def test_meter_switch_port_receive_frame_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.frame_error', expected_data) - - def test_meter_switch_port_receive_overrun_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.overrun_error', - expected_data) - - def test_meter_switch_port_receive_crc_error(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.receive.crc_error', expected_data) - - def test_meter_switch_port_collision_count(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '4'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '3'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '2'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '1'}), - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'port': '0'}), - ] - self._test_for_meter('switch.port.collision.count', expected_data) - - def test_meter_switch_table(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1'}), - ] - self._test_for_meter('switch.table', expected_data) - - def test_meter_switch_table_active_entries(self): - expected_data = [ - (11, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - (20, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1'}), - ] - self._test_for_meter('switch.table.active.entries', expected_data) - - def test_meter_switch_table_lookup_packets(self): - expected_data = [ - (816, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - (10, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1'}), - ] - self._test_for_meter('switch.table.lookup.packets', expected_data) - - def test_meter_switch_table_matched_packets(self): - expected_data = [ - (220, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0'}), - (5, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1'}), - ] - self._test_for_meter('switch.table.matched.packets', expected_data) - - def test_meter_switch_flow(self): - expected_data = [ - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1" - }), - (1, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.2", - "flow_actions_@type": "output", - "flow_actions_port_id": "4", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1" - }), - ] - self._test_for_meter('switch.flow', expected_data) - - def test_meter_switch_flow_duration_seconds(self): - expected_data = [ - (1828, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - (5648, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.2", - "flow_actions_@type": "output", - "flow_actions_port_id": "4", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.duration_seconds', expected_data) - - def test_meter_switch_flow_duration_nanoseconds(self): - expected_data = [ - (397000000, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - (200000, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.2", - "flow_actions_@type": "output", - "flow_actions_port_id": "4", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.duration_nanoseconds', expected_data) - - def test_meter_switch_flow_packets(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - (30, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.2", - "flow_actions_@type": "output", - "flow_actions_port_id": "4", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.packets', expected_data) - - def test_meter_switch_flow_bytes(self): - expected_data = [ - (0, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '0', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.1", - "flow_actions_@type": "output", - "flow_actions_port_id": "3", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:02", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - (89, "00:00:00:00:00:00:00:02", { - 'controller': 'OpenDaylight', - 'container': 'default', - 'table_id': '1', - 'flow_id': '0', - "flow_match_matchField[0]_type": "DL_TYPE", - "flow_match_matchField[0]_value": "2048", - "flow_match_matchField[1]_mask": "255.255.255.255", - "flow_match_matchField[1]_type": "NW_DST", - "flow_match_matchField[1]_value": "1.1.1.2", - "flow_actions_@type": "output", - "flow_actions_port_id": "4", - "flow_actions_port_node_id": "00:00:00:00:00:00:00:03", - "flow_actions_port_node_type": "OF", - "flow_actions_port_type": "OF", - "flow_hardTimeout": "0", - "flow_idleTimeout": "0", - "flow_priority": "1"}), - ] - self._test_for_meter('switch.flow.bytes', expected_data) diff --git a/ceilometer/tests/unit/network/statistics/test_driver.py b/ceilometer/tests/unit/network/statistics/test_driver.py deleted file mode 100644 index 9964b7f4..00000000 --- a/ceilometer/tests/unit/network/statistics/test_driver.py +++ /dev/null @@ -1,37 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslotest import base - -from ceilometer.network.statistics import driver - - -class TestDriver(base.BaseTestCase): - - @staticmethod - def test_driver_ok(): - - class OkDriver(driver.Driver): - - def get_sample_data(self, meter_name, resources, cache): - pass - - OkDriver() - - def test_driver_ng(self): - - class NgDriver(driver.Driver): - """get_sample_data method is lost.""" - - self.assertRaises(TypeError, NgDriver) diff --git a/ceilometer/tests/unit/network/statistics/test_flow.py b/ceilometer/tests/unit/network/statistics/test_flow.py deleted file mode 100644 index e25b559f..00000000 --- a/ceilometer/tests/unit/network/statistics/test_flow.py +++ /dev/null @@ -1,56 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.network.statistics import flow -from ceilometer import sample -from ceilometer.tests.unit.network import statistics - - -class TestFlowPollsters(statistics._PollsterTestBase): - - def test_flow_pollster(self): - self._test_pollster( - flow.FlowPollster, - 'switch.flow', - sample.TYPE_GAUGE, - 'flow') - - def test_flow_pollster_duration_seconds(self): - self._test_pollster( - flow.FlowPollsterDurationSeconds, - 'switch.flow.duration_seconds', - sample.TYPE_GAUGE, - 's') - - def test_flow_pollster_duration_nanoseconds(self): - self._test_pollster( - flow.FlowPollsterDurationNanoseconds, - 'switch.flow.duration_nanoseconds', - sample.TYPE_GAUGE, - 'ns') - - def test_flow_pollster_packets(self): - self._test_pollster( - flow.FlowPollsterPackets, - 'switch.flow.packets', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_flow_pollster_bytes(self): - self._test_pollster( - flow.FlowPollsterBytes, - 'switch.flow.bytes', - sample.TYPE_CUMULATIVE, - 'B') diff --git a/ceilometer/tests/unit/network/statistics/test_port.py b/ceilometer/tests/unit/network/statistics/test_port.py deleted file mode 100644 index d05f9127..00000000 --- a/ceilometer/tests/unit/network/statistics/test_port.py +++ /dev/null @@ -1,112 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.network.statistics import port -from ceilometer import sample -from ceilometer.tests.unit.network import statistics - - -class TestPortPollsters(statistics._PollsterTestBase): - - def test_port_pollster(self): - self._test_pollster( - port.PortPollster, - 'switch.port', - sample.TYPE_GAUGE, - 'port') - - def test_port_pollster_receive_packets(self): - self._test_pollster( - port.PortPollsterReceivePackets, - 'switch.port.receive.packets', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_transmit_packets(self): - self._test_pollster( - port.PortPollsterTransmitPackets, - 'switch.port.transmit.packets', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_receive_bytes(self): - self._test_pollster( - port.PortPollsterReceiveBytes, - 'switch.port.receive.bytes', - sample.TYPE_CUMULATIVE, - 'B') - - def test_port_pollster_transmit_bytes(self): - self._test_pollster( - port.PortPollsterTransmitBytes, - 'switch.port.transmit.bytes', - sample.TYPE_CUMULATIVE, - 'B') - - def test_port_pollster_receive_drops(self): - self._test_pollster( - port.PortPollsterReceiveDrops, - 'switch.port.receive.drops', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_transmit_drops(self): - self._test_pollster( - port.PortPollsterTransmitDrops, - 'switch.port.transmit.drops', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_receive_errors(self): - self._test_pollster( - port.PortPollsterReceiveErrors, - 'switch.port.receive.errors', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_transmit_errors(self): - self._test_pollster( - port.PortPollsterTransmitErrors, - 'switch.port.transmit.errors', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_receive_frame_errors(self): - self._test_pollster( - port.PortPollsterReceiveFrameErrors, - 'switch.port.receive.frame_error', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_receive_overrun_errors(self): - self._test_pollster( - port.PortPollsterReceiveOverrunErrors, - 'switch.port.receive.overrun_error', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_receive_crc_errors(self): - self._test_pollster( - port.PortPollsterReceiveCRCErrors, - 'switch.port.receive.crc_error', - sample.TYPE_CUMULATIVE, - 'packet') - - def test_port_pollster_collision_count(self): - self._test_pollster( - port.PortPollsterCollisionCount, - 'switch.port.collision.count', - sample.TYPE_CUMULATIVE, - 'packet') diff --git a/ceilometer/tests/unit/network/statistics/test_statistics.py b/ceilometer/tests/unit/network/statistics/test_statistics.py deleted file mode 100644 index 5afdd790..00000000 --- a/ceilometer/tests/unit/network/statistics/test_statistics.py +++ /dev/null @@ -1,185 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from oslo_utils import timeutils -from oslotest import base - -from ceilometer.network import statistics -from ceilometer.network.statistics import driver -from ceilometer import sample - - -class TestBase(base.BaseTestCase): - - @staticmethod - def test_subclass_ok(): - - class OkSubclass(statistics._Base): - - meter_name = 'foo' - meter_type = sample.TYPE_GAUGE - meter_unit = 'B' - - OkSubclass() - - def test_subclass_ng(self): - - class NgSubclass1(statistics._Base): - """meter_name is lost.""" - - meter_type = sample.TYPE_GAUGE - meter_unit = 'B' - - class NgSubclass2(statistics._Base): - """meter_type is lost.""" - - meter_name = 'foo' - meter_unit = 'B' - - class NgSubclass3(statistics._Base): - """meter_unit is lost.""" - - meter_name = 'foo' - meter_type = sample.TYPE_GAUGE - - self.assertRaises(TypeError, NgSubclass1) - self.assertRaises(TypeError, NgSubclass2) - self.assertRaises(TypeError, NgSubclass3) - - -class TestBaseGetSamples(base.BaseTestCase): - - def setUp(self): - super(TestBaseGetSamples, self).setUp() - - class FakePollster(statistics._Base): - meter_name = 'foo' - meter_type = sample.TYPE_CUMULATIVE - meter_unit = 'bar' - - self.pollster = FakePollster() - - def tearDown(self): - statistics._Base.drivers = {} - super(TestBaseGetSamples, self).tearDown() - - @staticmethod - def _setup_ext_mgr(**drivers): - statistics._Base.drivers = drivers - - def _make_fake_driver(self, *return_values): - class FakeDriver(driver.Driver): - - def __init__(self): - self.index = 0 - - def get_sample_data(self, meter_name, parse_url, params, cache): - if self.index >= len(return_values): - yield None - retval = return_values[self.index] - self.index += 1 - yield retval - return FakeDriver - - @staticmethod - def _make_timestamps(count): - now = timeutils.utcnow() - return [(now + datetime.timedelta(seconds=i)).isoformat() - for i in range(count)] - - def _get_samples(self, *resources): - - return [v for v in self.pollster.get_samples(self, {}, resources)] - - def _assert_sample(self, s, volume, resource_id, resource_metadata): - self.assertEqual('foo', s.name) - self.assertEqual(sample.TYPE_CUMULATIVE, s.type) - self.assertEqual('bar', s.unit) - self.assertEqual(volume, s.volume) - self.assertIsNone(s.user_id) - self.assertIsNone(s.project_id) - self.assertEqual(resource_id, s.resource_id) - self.assertEqual(resource_metadata, s.resource_metadata) - - def test_get_samples_one_driver_one_resource(self): - fake_driver = self._make_fake_driver((1, 'a', {'spam': 'egg'},), - (2, 'b', None)) - - self._setup_ext_mgr(http=fake_driver()) - - samples = self._get_samples('http://foo') - - self.assertEqual(1, len(samples)) - self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}) - - def test_get_samples_one_driver_two_resource(self): - fake_driver = self._make_fake_driver((1, 'a', {'spam': 'egg'},), - (2, 'b', None), - (3, 'c', None)) - - self._setup_ext_mgr(http=fake_driver()) - - samples = self._get_samples('http://foo', 'http://bar') - - self.assertEqual(2, len(samples)) - self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}) - self._assert_sample(samples[1], 2, 'b', {}) - - def test_get_samples_two_driver_one_resource(self): - fake_driver1 = self._make_fake_driver((1, 'a', {'spam': 'egg'},), - (2, 'b', None)) - - fake_driver2 = self._make_fake_driver((11, 'A', None), - (12, 'B', None)) - - self._setup_ext_mgr(http=fake_driver1(), https=fake_driver2()) - - samples = self._get_samples('http://foo') - - self.assertEqual(1, len(samples)) - self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}) - - def test_get_samples_multi_samples(self): - fake_driver = self._make_fake_driver([(1, 'a', {'spam': 'egg'},), - (2, 'b', None)]) - - self._setup_ext_mgr(http=fake_driver()) - - samples = self._get_samples('http://foo') - - self.assertEqual(2, len(samples)) - self._assert_sample(samples[0], 1, 'a', {'spam': 'egg'}) - self._assert_sample(samples[1], 2, 'b', {}) - - def test_get_samples_return_none(self): - fake_driver = self._make_fake_driver(None) - - self._setup_ext_mgr(http=fake_driver()) - - samples = self._get_samples('http://foo') - - self.assertEqual(0, len(samples)) - - def test_get_samples_return_no_generator(self): - class NoneFakeDriver(driver.Driver): - - def get_sample_data(self, meter_name, parse_url, params, cache): - return None - - self._setup_ext_mgr(http=NoneFakeDriver()) - samples = self._get_samples('http://foo') - self.assertFalse(samples) diff --git a/ceilometer/tests/unit/network/statistics/test_switch.py b/ceilometer/tests/unit/network/statistics/test_switch.py deleted file mode 100644 index c532a3f0..00000000 --- a/ceilometer/tests/unit/network/statistics/test_switch.py +++ /dev/null @@ -1,28 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.network.statistics import switch -from ceilometer import sample -from ceilometer.tests.unit.network import statistics - - -class TestSwitchPollster(statistics._PollsterTestBase): - - def test_table_pollster(self): - self._test_pollster( - switch.SWPollster, - 'switch', - sample.TYPE_GAUGE, - 'switch') diff --git a/ceilometer/tests/unit/network/statistics/test_table.py b/ceilometer/tests/unit/network/statistics/test_table.py deleted file mode 100644 index 533e2a61..00000000 --- a/ceilometer/tests/unit/network/statistics/test_table.py +++ /dev/null @@ -1,49 +0,0 @@ -# -# Copyright 2014 NEC Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer.network.statistics import table -from ceilometer import sample -from ceilometer.tests.unit.network import statistics - - -class TestTablePollsters(statistics._PollsterTestBase): - - def test_table_pollster(self): - self._test_pollster( - table.TablePollster, - 'switch.table', - sample.TYPE_GAUGE, - 'table') - - def test_table_pollster_active_entries(self): - self._test_pollster( - table.TablePollsterActiveEntries, - 'switch.table.active.entries', - sample.TYPE_GAUGE, - 'entry') - - def test_table_pollster_lookup_packets(self): - self._test_pollster( - table.TablePollsterLookupPackets, - 'switch.table.lookup.packets', - sample.TYPE_GAUGE, - 'packet') - - def test_table_pollster_matched_packets(self): - self._test_pollster( - table.TablePollsterMatchedPackets, - 'switch.table.matched.packets', - sample.TYPE_GAUGE, - 'packet') diff --git a/ceilometer/tests/unit/network/test_floating_ip.py b/ceilometer/tests/unit/network/test_floating_ip.py deleted file mode 100644 index 5e8f7e72..00000000 --- a/ceilometer/tests/unit/network/test_floating_ip.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2016 Sungard Availability Services -# Copyright 2016 Red Hat -# All Rights Reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import base -from oslotest import mockpatch - -from ceilometer.agent import manager -from ceilometer.agent import plugin_base -from ceilometer.network import floatingip -from ceilometer.network.services import discovery - - -class _BaseTestFloatingIPPollster(base.BaseTestCase): - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(_BaseTestFloatingIPPollster, self).setUp() - self.manager = manager.AgentManager() - plugin_base._get_keystone = mock.Mock() - - -class TestFloatingIPPollster(_BaseTestFloatingIPPollster): - - def setUp(self): - super(TestFloatingIPPollster, self).setUp() - self.pollster = floatingip.FloatingIPPollster() - fake_fip = self.fake_get_fip_service() - self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.' - 'fip_get_all', - return_value=fake_fip)) - - @staticmethod - def fake_get_fip_service(): - return [{'router_id': 'e24f8a37-1bb7-49e4-833c-049bb21986d2', - 'status': 'ACTIVE', - 'tenant_id': '54a00c50ee4c4396b2f8dc220a2bed57', - 'floating_network_id': - 'f41f399e-d63e-47c6-9a19-21c4e4fbbba0', - 'fixed_ip_address': '10.0.0.6', - 'floating_ip_address': '65.79.162.11', - 'port_id': '93a0d2c7-a397-444c-9d75-d2ac89b6f209', - 'id': '18ca27bf-72bc-40c8-9c13-414d564ea367'}, - {'router_id': 'astf8a37-1bb7-49e4-833c-049bb21986d2', - 'status': 'DOWN', - 'tenant_id': '34a00c50ee4c4396b2f8dc220a2bed57', - 'floating_network_id': - 'gh1f399e-d63e-47c6-9a19-21c4e4fbbba0', - 'fixed_ip_address': '10.0.0.7', - 'floating_ip_address': '65.79.162.12', - 'port_id': '453a0d2c7-a397-444c-9d75-d2ac89b6f209', - 'id': 'jkca27bf-72bc-40c8-9c13-414d564ea367'}, - {'router_id': 'e2478937-1bb7-49e4-833c-049bb21986d2', - 'status': 'error', - 'tenant_id': '54a0gggg50ee4c4396b2f8dc220a2bed57', - 'floating_network_id': - 'po1f399e-d63e-47c6-9a19-21c4e4fbbba0', - 'fixed_ip_address': '10.0.0.8', - 'floating_ip_address': '65.79.162.13', - 'port_id': '67a0d2c7-a397-444c-9d75-d2ac89b6f209', - 'id': '90ca27bf-72bc-40c8-9c13-414d564ea367'}] - - def test_fip_get_samples(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fip_service())) - self.assertEqual(3, len(samples)) - for field in self.pollster.FIELDS: - self.assertEqual(self.fake_get_fip_service()[0][field], - samples[0].resource_metadata[field]) - - def test_fip_volume(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fip_service())) - self.assertEqual(1, samples[0].volume) - - def test_get_fip_meter_names(self): - samples = list(self.pollster.get_samples( - self.manager, {}, - resources=self.fake_get_fip_service())) - self.assertEqual(set(['ip.floating']), - set([s.name for s in samples])) - - def test_fip_discovery(self): - discovered_fips = discovery.FloatingIPDiscovery().discover( - self.manager) - self.assertEqual(3, len(discovered_fips)) diff --git a/ceilometer/tests/unit/network/test_notifications.py b/ceilometer/tests/unit/network/test_notifications.py deleted file mode 100644 index 80eda675..00000000 --- a/ceilometer/tests/unit/network/test_notifications.py +++ /dev/null @@ -1,1480 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer.network.notifications -""" - -import mock - -from ceilometer.network import notifications -from ceilometer.tests import base as test - -NOTIFICATION_NETWORK_CREATE = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'network.create.end', - u'timestamp': u'2012-09-27 14:11:27.086575', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': {u'network': - {u'status': u'ACTIVE', - u'subnets': [], - u'name': u'abcedf', - u'router:external': False, - u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'admin_state_up': True, - u'shared': False, - u'id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be'}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:11:26.924779', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} - -NOTIFICATION_BULK_NETWORK_CREATE = { - '_context_roles': [u'_member_', - u'heat_stack_owner', - u'admin'], - u'_context_request_id': u'req-a2dfdefd-b773-4400-9d52-5e146e119950', - u'_context_read_deleted': u'no', - u'event_type': u'network.create.end', - u'_context_user_name': u'admin', - u'_context_project_name': u'admin', - u'timestamp': u'2014-05-1510: 24: 56.335612', - u'_context_tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'_context_tenant_name': u'admin', - u'_context_tenant': u'980ec4870033453ead65c0470a78b8a8', - u'message_id': u'914eb601-9390-4a72-8629-f013a4c84467', - u'priority': 'info', - u'_context_is_admin': True, - u'_context_project_id': u'980ec4870033453ead65c0470a78b8a8', - u'_context_timestamp': u'2014-05-1510: 24: 56.285975', - u'_context_user': u'7520940056d54cceb25cbce888300bea', - u'_context_user_id': u'7520940056d54cceb25cbce888300bea', - u'publisher_id': u'network.devstack', - u'payload': { - u'networks': [{u'status': u'ACTIVE', - u'subnets': [], - u'name': u'test2', - u'provider: physical_network': None, - u'admin_state_up': True, - u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'provider: network_type': u'local', - u'shared': False, - u'id': u'7cbc7a66-bbd0-41fc-a186-81c3da5c9843', - u'provider: segmentation_id': None}, - {u'status': u'ACTIVE', - u'subnets': [], - u'name': u'test3', - u'provider: physical_network': None, - u'admin_state_up': True, - u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'provider: network_type': u'local', - u'shared': False, - u'id': u'5a7cb86f-1638-4cc1-8dcc-8bbbc8c7510d', - u'provider: segmentation_id': None}] - } -} - -NOTIFICATION_SUBNET_CREATE = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'subnet.create.end', - u'timestamp': u'2012-09-27 14:11:27.426620', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': { - u'subnet': { - u'name': u'mysubnet', - u'enable_dhcp': True, - u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', - u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'dns_nameservers': [], - u'allocation_pools': [{u'start': u'192.168.42.2', - u'end': u'192.168.42.254'}], - u'host_routes': [], - u'ip_version': 4, - u'gateway_ip': u'192.168.42.1', - u'cidr': u'192.168.42.0/24', - u'id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5'}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:11:27.214490', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'd86dfc66-d3c3-4aea-b06d-bf37253e6116'} - -NOTIFICATION_BULK_SUBNET_CREATE = { - '_context_roles': [u'_member_', - u'heat_stack_owner', - u'admin'], - u'_context_request_id': u'req-b77e278a-0cce-4987-9f82-15957b234768', - u'_context_read_deleted': u'no', - u'event_type': u'subnet.create.end', - u'_context_user_name': u'admin', - u'_context_project_name': u'admin', - u'timestamp': u'2014-05-1510: 47: 08.133888', - u'_context_tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'_context_tenant_name': u'admin', - u'_context_tenant': u'980ec4870033453ead65c0470a78b8a8', - u'message_id': u'c7e6f9fd-ead2-415f-8493-b95bedf72e43', - u'priority': u'info', - u'_context_is_admin': True, - u'_context_project_id': u'980ec4870033453ead65c0470a78b8a8', - u'_context_timestamp': u'2014-05-1510: 47: 07.970043', - u'_context_user': u'7520940056d54cceb25cbce888300bea', - u'_context_user_id': u'7520940056d54cceb25cbce888300bea', - u'publisher_id': u'network.devstack', - u'payload': { - u'subnets': [{u'name': u'', - u'enable_dhcp': True, - u'network_id': u'3ddfe60b-34b4-4e9d-9440-43c904b1c58e', - u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'dns_nameservers': [], - u'ipv6_ra_mode': None, - u'allocation_pools': [{u'start': u'10.0.4.2', - u'end': u'10.0.4.254'}], - u'host_routes': [], - u'ipv6_address_mode': None, - u'ip_version': 4, - u'gateway_ip': u'10.0.4.1', - u'cidr': u'10.0.4.0/24', - u'id': u'14020d7b-6dd7-4349-bb8e-8f954c919022'}, - {u'name': u'', - u'enable_dhcp': True, - u'network_id': u'3ddfe60b-34b4-4e9d-9440-43c904b1c58e', - u'tenant_id': u'980ec4870033453ead65c0470a78b8a8', - u'dns_nameservers': [], - u'ipv6_ra_mode': None, - u'allocation_pools': [{u'start': u'10.0.5.2', - u'end': u'10.0.5.254'}], - u'host_routes': [], - u'ipv6_address_mode': None, - u'ip_version': 4, - u'gateway_ip': u'10.0.5.1', - u'cidr': u'10.0.5.0/24', - u'id': u'a080991b-a32a-4bf7-a558-96c4b77d075c'}] - } -} - -NOTIFICATION_PORT_CREATE = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'port.create.end', - u'timestamp': u'2012-09-27 14:28:31.536370', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': { - u'port': { - u'status': u'ACTIVE', - u'name': u'', - u'admin_state_up': True, - u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', - u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'device_owner': u'', - u'mac_address': u'fa:16:3e:75:0c:49', - u'fixed_ips': [{ - u'subnet_id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5', - u'ip_address': u'192.168.42.3'}], - u'id': u'9cdfeb92-9391-4da7-95a1-ca214831cfdb', - u'device_id': u''}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:28:31.438919', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'7135b8ab-e13c-4ac8-bc31-75e7f756622a'} - -NOTIFICATION_BULK_PORT_CREATE = { - u'_context_roles': [u'_member_', - u'SwiftOperator'], - u'_context_request_id': u'req-678be9ad-c399-475a-b3e8-8da0c06375aa', - u'_context_read_deleted': u'no', - u'event_type': u'port.create.end', - u'_context_project_name': u'demo', - u'timestamp': u'2014-05-0909: 19: 58.317548', - u'_context_tenant_id': u'133087d90fc149528b501dd8b75ea965', - u'_context_timestamp': u'2014-05-0909: 19: 58.160011', - u'_context_tenant': u'133087d90fc149528b501dd8b75ea965', - u'payload': { - u'ports': [{u'status': u'DOWN', - u'name': u'port--1501135095', - u'allowed_address_pairs': [], - u'admin_state_up': True, - u'network_id': u'acf63fdc-b43b-475d-8cca-9429b843d5e8', - u'tenant_id': u'133087d90fc149528b501dd8b75ea965', - u'binding: vnic_type': u'normal', - u'device_owner': u'', - u'mac_address': u'fa: 16: 3e: 37: 10: 39', - u'fixed_ips': [], - u'id': u'296c2c9f-14e9-48da-979d-78b213454c59', - u'security_groups': [ - u'a06f7c9d-9e5a-46b0-9f6c-ce812aa2e5ff'], - u'device_id': u''}, - {u'status': u'DOWN', - u'name': u'', - u'allowed_address_pairs': [], - u'admin_state_up': False, - u'network_id': u'0a8eea59-0146-425c-b470-e9ddfa99ec61', - u'tenant_id': u'133087d90fc149528b501dd8b75ea965', - u'binding: vnic_type': u'normal', - u'device_owner': u'', - u'mac_address': u'fa: 16: 3e: 8e: 6e: 53', - u'fixed_ips': [], - u'id': u'd8bb667f-5cd3-4eca-a984-268e25b1b7a5', - u'security_groups': [ - u'a06f7c9d-9e5a-46b0-9f6c-ce812aa2e5ff'], - u'device_id': u''}] - }, - u'_unique_id': u'60b1650f17fc4fa59492f447321fb26c', - u'_context_is_admin': False, - u'_context_project_id': u'133087d90fc149528b501dd8b75ea965', - u'_context_tenant_name': u'demo', - u'_context_user': u'b1eb48f9c54741f4adc1b4ea512d400c', - u'_context_user_name': u'demo', - u'publisher_id': u'network.os-ci-test12', - u'message_id': u'04aa45e1-3c30-4c69-8638-e7ff8621e9bc', - u'_context_user_id': u'b1eb48f9c54741f4adc1b4ea512d400c', - u'priority': u'INFO' -} - -NOTIFICATION_PORT_UPDATE = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'port.update.end', - u'timestamp': u'2012-09-27 14:35:09.514052', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': { - u'port': { - u'status': u'ACTIVE', - u'name': u'bonjour', - u'admin_state_up': True, - u'network_id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be', - u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'device_owner': u'', - u'mac_address': u'fa:16:3e:75:0c:49', - u'fixed_ips': [{ - u'subnet_id': u'1a3a170d-d7ce-4cc9-b1db-621da15a25f5', - u'ip_address': u'192.168.42.3'}], - u'id': u'9cdfeb92-9391-4da7-95a1-ca214831cfdb', - u'device_id': u''}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:35:09.447682', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'07b0a3a1-c0b5-40ab-a09c-28dee6bf48f4'} - - -NOTIFICATION_NETWORK_EXISTS = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'network.exists', - u'timestamp': u'2012-09-27 14:11:27.086575', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': {u'network': - {u'status': u'ACTIVE', - u'subnets': [], - u'name': u'abcedf', - u'router:external': False, - u'tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'admin_state_up': True, - u'shared': False, - u'id': u'7fd4eb2f-a38e-4c25-8490-71ca8800c9be'}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:11:26.924779', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} - - -NOTIFICATION_ROUTER_EXISTS = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'router.exists', - u'timestamp': u'2012-09-27 14:11:27.086575', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': {u'router': - {'status': u'ACTIVE', - 'external_gateway_info': - {'network_id': u'89d55642-4dec-43a4-a617-6cec051393b5'}, - 'name': u'router1', - 'admin_state_up': True, - 'tenant_id': u'bb04a2b769c94917b57ba49df7783cfd', - 'id': u'ab8bb3ed-df23-4ca0-8f03-b887abcd5c23'}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:11:26.924779', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} - - -NOTIFICATION_FLOATINGIP_EXISTS = { - u'_context_roles': [u'anotherrole', - u'Member'], - u'_context_read_deleted': u'no', - u'event_type': u'floatingip.exists', - u'timestamp': u'2012-09-27 14:11:27.086575', - u'_context_tenant_id': u'82ed0c40ebe64d0bb3310027039c8ed2', - u'payload': {u'floatingip': - {'router_id': None, - 'tenant_id': u'6e5f9df9b3a249ab834f25fe1b1b81fd', - 'floating_network_id': - u'001400f7-1710-4245-98c3-39ba131cc39a', - 'fixed_ip_address': None, - 'floating_ip_address': u'172.24.4.227', - 'port_id': None, - 'id': u'2b7cc28c-6f78-4735-9246-257168405de6'}}, - u'priority': u'INFO', - u'_context_is_admin': False, - u'_context_timestamp': u'2012-09-27 14:11:26.924779', - u'_context_user_id': u'b44b7ce67fc84414a5c1660a92a1b862', - u'publisher_id': u'network.ubuntu-VirtualBox', - u'message_id': u'9e839576-cc47-4c60-a7d8-5743681213b1'} - - -NOTIFICATION_FLOATINGIP_UPDATE_START = { - '_context_roles': [u'_member_', - u'admin', - u'heat_stack_owner'], - '_context_request_id': u'req-bd5ed336-242f-4705-836e-8e8f3d0d1ced', - '_context_read_deleted': u'no', - 'event_type': u'floatingip.update.start', - '_context_user_name': u'admin', - '_context_project_name': u'admin', - 'timestamp': u'2014-05-3107: 19: 43.463101', - '_context_tenant_id': u'9fc714821a3747c8bc4e3a9bfbe82732', - '_context_tenant_name': u'admin', - '_context_tenant': u'9fc714821a3747c8bc4e3a9bfbe82732', - 'message_id': u'0ab6d71f-ba0a-4501-86fe-6cc20521ef5a', - 'priority': 'info', - '_context_is_admin': True, - '_context_project_id': u'9fc714821a3747c8bc4e3a9bfbe82732', - '_context_timestamp': u'2014-05-3107: 19: 43.460767', - '_context_user': u'6ca7b13b33e4425cae0b85e2cf93d9a1', - '_context_user_id': u'6ca7b13b33e4425cae0b85e2cf93d9a1', - 'publisher_id': u'network.devstack', - 'payload': { - u'id': u'64262b2a-8f5d-4ade-9405-0cbdd03c1555', - u'floatingip': { - u'fixed_ip_address': u'172.24.4.227', - u'port_id': u'8ab815c8-03cc-4b45-a673-79bdd0c258f2' - } - } -} - - -NOTIFICATION_POOL_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-10715057-7590-4529-8020-b994295ee6f4", - "event_type": "pool.create.end", - "timestamp": "2014-09-15 17:20:50.687649", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "ce255443233748ce9cc71b480974df28", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "pool": { - "status": "ACTIVE", - "lb_method": "ROUND_ROBIN", - "protocol": "HTTP", "description": "", - "health_monitors": [], - "members": [], - "status_description": None, - "id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", - "vip_id": None, - "name": "my_pool", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "health_monitors_status": [], - "provider": "haproxy"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:20:49.600299", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "0a5ed7a6-e516-4aed-9968-4ee9f1b65cc2"} - - -NOTIFICATION_VIP_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "vip.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "vip": { - "status": "ACTIVE", - "protocol": "HTTP", - "description": "", - "address": "10.0.0.2", - "protocol_port": 80, - "port_id": "2b5dd476-11da-4d46-9f1e-7a75436062f6", - "id": "87a5ce35-f278-47f3-8990-7f695f52f9bf", - "status_description": None, - "name": "my_vip", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "connection_limit": -1, - "pool_id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", - "session_persistence": {"type": "SOURCE_IP"}}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "3895ad11-98a3-4031-92af-f76e96736661"} - - -NOTIFICATION_HEALTH_MONITORS_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "health_monitor.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "health_monitor": { - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "delay": 10, - "max_retries": 10, - "timeout": 10, - "pools": [], - "type": "PING", - "id": "6dea2d01-c3af-4696-9192-6c938f391f01"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_MEMBERS_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "member.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "member": {"admin_state_up": True, - "status": "ACTIVE", - "status_description": None, - "weight": 1, - "address": "10.0.0.3", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "protocol_port": 80, - "id": "5e32f960-63ae-4a93-bfa2-339aa83d82ce", - "pool_id": "6b73b9f8-d807-4553-87df-eb34cdd08070"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_FIREWALL_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall": { - "status": "ACTIVE", - "name": "my_firewall", - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "firewall_policy_id": "c46a1c15-0496-41c9-beff-9a309a25653e", - "id": "e2d1155f-6bc4-4292-9cfa-ea91af4b38c8", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_FIREWALL_RULE_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall_rule.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall_rule": { - "protocol": "tcp", - "description": "", - "source_port": 80, - "source_ip_address": '192.168.255.10', - "destination_ip_address": '10.10.10.1', - "firewall_policy_id": '', - "position": None, - "destination_port": 80, - "id": "53b7c0d3-cb87-4069-9e29-1e866583cc8c", - "name": "rule_01", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "enabled": True, - "action": "allow", - "ip_version": 4, - "shared": False}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_FIREWALL_POLICY_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall_policy.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall_policy": {"name": "my_policy", - "firewall_rules": [], - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "audited": False, - "shared": False, - "id": "c46a1c15-0496-41c9-beff-9a309a25653e", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_VPNSERVICE_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "vpnservice.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "vpnservice": {"router_id": "75871c53-e722-4b21-93ed-20cb40b6b672", - "status": "ACTIVE", - "name": "my_vpn", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_IPSEC_POLICY_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ipsecpolicy.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ipsecpolicy": {"encapsulation_mode": "tunnel", - "encryption_algorithm": "aes-128", - "pfs": "group5", - "lifetime": { - "units": "seconds", - "value": 3600}, - "name": "my_ipsec_polixy", - "transform_protocol": "esp", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "id": "998d910d-4506-47c9-a160-47ec51ff53fc", - "auth_algorithm": "sha1", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - - -NOTIFICATION_IKE_POLICY_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ikepolicy.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ikepolicy": {"encryption_algorithm": "aes-128", - "pfs": "group5", - "name": "my_ike_policy", - "phase1_negotiation_mode": "main", - "lifetime": {"units": "seconds", - "value": 3600}, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "ike_version": "v1", - "id": "11cef94e-3f6a-4b65-8058-7deb1838633a", - "auth_algorithm": "sha1", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - - -NOTIFICATION_IPSEC_SITE_CONN_CREATE = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ipsec_site_connection.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ipsec_site_connection": { - "status": "ACTIVE", - "psk": "test", - "initiator": "bi-directional", - "name": "my_ipsec_connection", - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "ipsecpolicy_id": "998d910d-4506-47c9-a160-47ec51ff53fc", - "auth_mode": "psk", "peer_cidrs": ["192.168.255.0/24"], - "mtu": 1500, - "ikepolicy_id": "11cef94e-3f6a-4b65-8058-7deb1838633a", - "dpd": {"action": "hold", - "interval": 30, - "timeout": 120}, - "route_mode": "static", - "vpnservice_id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", - "peer_address": "10.0.0.1", - "peer_id": "10.0.0.254", - "id": "06f3c1ec-2e01-4ad6-9c98-4252751fc60a", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - - -NOTIFICATION_POOL_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-10715057-7590-4529-8020-b994295ee6f4", - "event_type": "pool.update.end", - "timestamp": "2014-09-15 17:20:50.687649", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "ce255443233748ce9cc71b480974df28", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "pool": { - "status": "ACTIVE", - "lb_method": "ROUND_ROBIN", - "protocol": "HTTP", "description": "", - "health_monitors": [], - "members": [], - "status_description": None, - "id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", - "vip_id": None, - "name": "my_pool", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "health_monitors_status": [], - "provider": "haproxy"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:20:49.600299", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "0a5ed7a6-e516-4aed-9968-4ee9f1b65cc2"} - - -NOTIFICATION_VIP_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "vip.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "vip": { - "status": "ACTIVE", - "protocol": "HTTP", - "description": "", - "address": "10.0.0.2", - "protocol_port": 80, - "port_id": "2b5dd476-11da-4d46-9f1e-7a75436062f6", - "id": "87a5ce35-f278-47f3-8990-7f695f52f9bf", - "status_description": None, - "name": "my_vip", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "connection_limit": -1, - "pool_id": "6d726518-f3aa-4dd4-ac34-e156a35c0aff", - "session_persistence": {"type": "SOURCE_IP"}}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "3895ad11-98a3-4031-92af-f76e96736661"} - - -NOTIFICATION_HEALTH_MONITORS_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "health_monitor.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "health_monitor": { - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "delay": 10, - "max_retries": 10, - "timeout": 10, - "pools": [], - "type": "PING", - "id": "6dea2d01-c3af-4696-9192-6c938f391f01"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_MEMBERS_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "member.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "member": {"admin_state_up": True, - "status": "ACTIVE", - "status_description": None, - "weight": 1, - "address": "10.0.0.3", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "protocol_port": 80, - "id": "5e32f960-63ae-4a93-bfa2-339aa83d82ce", - "pool_id": "6b73b9f8-d807-4553-87df-eb34cdd08070"}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_FIREWALL_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall": { - "status": "ACTIVE", - "name": "my_firewall", - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "firewall_policy_id": "c46a1c15-0496-41c9-beff-9a309a25653e", - "id": "e2d1155f-6bc4-4292-9cfa-ea91af4b38c8", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_FIREWALL_RULE_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall_rule.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall_rule": { - "protocol": "tcp", - "description": "", - "source_port": 80, - "source_ip_address": '192.168.255.10', - "destination_ip_address": '10.10.10.1', - "firewall_policy_id": '', - "position": None, - "destination_port": 80, - "id": "53b7c0d3-cb87-4069-9e29-1e866583cc8c", - "name": "rule_01", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "enabled": True, - "action": "allow", - "ip_version": 4, - "shared": False}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_FIREWALL_POLICY_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "firewall_policy.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "firewall_policy": {"name": "my_policy", - "firewall_rules": [], - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "audited": False, - "shared": False, - "id": "c46a1c15-0496-41c9-beff-9a309a25653e", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "fdffeca1-2b5a-4dc9-b8ae-87c482a83e0d"} - - -NOTIFICATION_VPNSERVICE_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "vpnservice.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "vpnservice": {"router_id": "75871c53-e722-4b21-93ed-20cb40b6b672", - "status": "ACTIVE", - "name": "my_vpn", - "admin_state_up": True, - "subnet_id": "afaf251b-2ec3-42ac-9fa9-82a4195724fa", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -NOTIFICATION_IPSEC_POLICY_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ipsecpolicy.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ipsecpolicy": {"encapsulation_mode": "tunnel", - "encryption_algorithm": "aes-128", - "pfs": "group5", - "lifetime": { - "units": "seconds", - "value": 3600}, - "name": "my_ipsec_polixy", - "transform_protocol": "esp", - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "id": "998d910d-4506-47c9-a160-47ec51ff53fc", - "auth_algorithm": "sha1", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - - -NOTIFICATION_IKE_POLICY_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ikepolicy.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ikepolicy": {"encryption_algorithm": "aes-128", - "pfs": "group5", - "name": "my_ike_policy", - "phase1_negotiation_mode": "main", - "lifetime": {"units": "seconds", - "value": 3600}, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "ike_version": "v1", - "id": "11cef94e-3f6a-4b65-8058-7deb1838633a", - "auth_algorithm": "sha1", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - - -NOTIFICATION_IPSEC_SITE_CONN_UPDATE = { - "_context_roles": ["admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "ipsec_site_connection.update.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "ipsec_site_connection": { - "status": "ACTIVE", - "psk": "test", - "initiator": "bi-directional", - "name": "my_ipsec_connection", - "admin_state_up": True, - "tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "ipsecpolicy_id": "998d910d-4506-47c9-a160-47ec51ff53fc", - "auth_mode": "psk", "peer_cidrs": ["192.168.255.0/24"], - "mtu": 1500, - "ikepolicy_id": "11cef94e-3f6a-4b65-8058-7deb1838633a", - "dpd": {"action": "hold", - "interval": 30, - "timeout": 120}, - "route_mode": "static", - "vpnservice_id": "270c40cc-28d5-4a7e-83da-cc33088ee5d6", - "peer_address": "10.0.0.1", - "peer_id": "10.0.0.254", - "id": "06f3c1ec-2e01-4ad6-9c98-4252751fc60a", - "description": ""}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "4c0e6ecb-2e40-4975-aee2-d88045c747bf"} - -NOTIFICATION_EMPTY_PAYLOAD = { - "_context_roles": ["heat_stack_owner", "admin"], - "_context_request_id": "req-e56a8a5e-5d42-43e8-9677-2d36e6e17d5e", - "event_type": "health_monitor.create.end", - "timestamp": "2014-09-15 17:22:11.323644", - "_context_tenant_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_user": "1c1f7c80efc24a16b835ae1c0802d0a1", - "_unique_id": "f112a185e1d1424eba3a13df9e0f0277", - "_context_tenant_name": "demo", - "_context_user_id": "1c1f7c80efc24a16b835ae1c0802d0a1", - "payload": { - "health_monitor": {}}, - "_context_project_name": "demo", - "_context_read_deleted": "no", - "_context_auth_token": "e6daf56d7d1787e1fbefff0ecf29703f", - "_context_tenant": "a820f2d6293b4a7587d1c582767f43fb", - "priority": "INFO", - "_context_is_admin": True, - "_context_project_id": "a820f2d6293b4a7587d1c582767f43fb", - "_context_timestamp": "2014-09-15 17:22:11.187163", - "_context_user_name": "admin", - "publisher_id": "network.ubuntu", - "message_id": "65067e3f-830d-4fbb-87e2-f0e51fda83d2"} - - -class TestNotifications(test.BaseTestCase): - def test_network_create(self): - v = notifications.Network(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_NETWORK_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.create", samples[1].name) - - def test_bulk_network_create(self): - v = notifications.Network(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_BULK_NETWORK_CREATE)) - self.assertEqual(4, len(samples)) - self.assertEqual("network", samples[0].name) - self.assertEqual("network.create", samples[1].name) - self.assertEqual("network", samples[2].name) - self.assertEqual("network.create", samples[3].name) - - def test_subnet_create(self): - v = notifications.Subnet(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_SUBNET_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("subnet.create", samples[1].name) - - def test_bulk_subnet_create(self): - v = notifications.Subnet(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_BULK_SUBNET_CREATE)) - self.assertEqual(4, len(samples)) - self.assertEqual("subnet", samples[0].name) - self.assertEqual("subnet.create", samples[1].name) - self.assertEqual("subnet", samples[2].name) - self.assertEqual("subnet.create", samples[3].name) - - def test_port_create(self): - v = notifications.Port(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_PORT_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("port.create", samples[1].name) - - def test_bulk_port_create(self): - v = notifications.Port(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_BULK_PORT_CREATE)) - self.assertEqual(4, len(samples)) - self.assertEqual("port", samples[0].name) - self.assertEqual("port.create", samples[1].name) - self.assertEqual("port", samples[2].name) - self.assertEqual("port.create", samples[3].name) - - def test_port_update(self): - v = notifications.Port(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_PORT_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("port.update", samples[1].name) - - def test_network_exists(self): - v = notifications.Network(mock.Mock()) - samples = v.process_notification(NOTIFICATION_NETWORK_EXISTS) - self.assertEqual(1, len(list(samples))) - - def test_router_exists(self): - v = notifications.Router(mock.Mock()) - samples = v.process_notification(NOTIFICATION_ROUTER_EXISTS) - self.assertEqual(1, len(list(samples))) - - def test_floatingip_exists(self): - v = notifications.FloatingIP(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_FLOATINGIP_EXISTS)) - self.assertEqual(1, len(samples)) - self.assertEqual("ip.floating", samples[0].name) - - def test_floatingip_update(self): - v = notifications.FloatingIP(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_FLOATINGIP_UPDATE_START)) - self.assertEqual(len(samples), 2) - self.assertEqual("ip.floating", samples[0].name) - - def test_pool_create(self): - v = notifications.Pool(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_POOL_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.pool", samples[0].name) - - def test_vip_create(self): - v = notifications.Vip(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_VIP_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.vip", samples[0].name) - - def test_member_create(self): - v = notifications.Member(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_MEMBERS_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.member", samples[0].name) - - def test_health_monitor_create(self): - v = notifications.HealthMonitor(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_HEALTH_MONITORS_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.health_monitor", samples[0].name) - - def test_firewall_create(self): - v = notifications.Firewall(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_FIREWALL_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall", samples[0].name) - - def test_vpnservice_create(self): - v = notifications.VPNService(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_VPNSERVICE_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn", samples[0].name) - - def test_ipsec_connection_create(self): - v = notifications.IPSecSiteConnection(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IPSEC_SITE_CONN_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.connections", samples[0].name) - - def test_firewall_policy_create(self): - v = notifications.FirewallPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_FIREWALL_POLICY_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall.policy", samples[0].name) - - def test_firewall_rule_create(self): - v = notifications.FirewallRule(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_FIREWALL_RULE_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall.rule", samples[0].name) - - def test_ipsec_policy_create(self): - v = notifications.IPSecPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IPSEC_POLICY_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.ipsecpolicy", samples[0].name) - - def test_ike_policy_create(self): - v = notifications.IKEPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IKE_POLICY_CREATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.ikepolicy", samples[0].name) - - def test_pool_update(self): - v = notifications.Pool(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_POOL_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.pool", samples[0].name) - - def test_vip_update(self): - v = notifications.Vip(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_VIP_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.vip", samples[0].name) - - def test_member_update(self): - v = notifications.Member(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_MEMBERS_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.member", samples[0].name) - - def test_health_monitor_update(self): - v = notifications.HealthMonitor(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_HEALTH_MONITORS_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.lb.health_monitor", samples[0].name) - - def test_firewall_update(self): - v = notifications.Firewall(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_FIREWALL_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall", samples[0].name) - - def test_vpnservice_update(self): - v = notifications.VPNService(mock.Mock()) - samples = list(v.process_notification(NOTIFICATION_VPNSERVICE_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn", samples[0].name) - - def test_ipsec_connection_update(self): - v = notifications.IPSecSiteConnection(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IPSEC_SITE_CONN_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.connections", samples[0].name) - - def test_firewall_policy_update(self): - v = notifications.FirewallPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_FIREWALL_POLICY_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall.policy", samples[0].name) - - def test_firewall_rule_update(self): - v = notifications.FirewallRule(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_FIREWALL_RULE_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.firewall.rule", samples[0].name) - - def test_ipsec_policy_update(self): - v = notifications.IPSecPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IPSEC_POLICY_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.ipsecpolicy", samples[0].name) - - def test_ike_policy_update(self): - v = notifications.IKEPolicy(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_IKE_POLICY_UPDATE)) - self.assertEqual(2, len(samples)) - self.assertEqual("network.services.vpn.ikepolicy", samples[0].name) - - def test_empty_event_payload(self): - v = notifications.HealthMonitor(mock.Mock()) - samples = list(v.process_notification( - NOTIFICATION_EMPTY_PAYLOAD)) - self.assertEqual(0, len(samples)) - - -class TestEventTypes(test.BaseTestCase): - - def test_network(self): - v = notifications.Network(mock.Mock()) - events = v.event_types - self.assertIsNotEmpty(events) - - def test_subnet(self): - v = notifications.Subnet(mock.Mock()) - events = v.event_types - self.assertIsNotEmpty(events) - - def test_port(self): - v = notifications.Port(mock.Mock()) - events = v.event_types - self.assertIsNotEmpty(events) - - def test_router(self): - self.assertTrue(notifications.Router(mock.Mock()).event_types) - - def test_floatingip(self): - self.assertTrue(notifications.FloatingIP(mock.Mock()).event_types) - - def test_pool(self): - self.assertTrue(notifications.Pool(mock.Mock()).event_types) - - def test_vip(self): - self.assertTrue(notifications.Vip(mock.Mock()).event_types) - - def test_member(self): - self.assertTrue(notifications.Member(mock.Mock()).event_types) - - def test_health_monitor(self): - self.assertTrue(notifications.HealthMonitor(mock.Mock()).event_types) - - def test_firewall(self): - self.assertTrue(notifications.Firewall(mock.Mock()).event_types) - - def test_vpnservice(self): - self.assertTrue(notifications.VPNService(mock.Mock()).event_types) - - def test_ipsec_connection(self): - self.assertTrue(notifications.IPSecSiteConnection( - mock.Mock()).event_types) - - def test_firewall_policy(self): - self.assertTrue(notifications.FirewallPolicy(mock.Mock()).event_types) - - def test_firewall_rule(self): - self.assertTrue(notifications.FirewallRule(mock.Mock()).event_types) - - def test_ipsec_policy(self): - self.assertTrue(notifications.IPSecPolicy(mock.Mock()).event_types) - - def test_ike_policy(self): - self.assertTrue(notifications.IKEPolicy(mock.Mock()).event_types) diff --git a/ceilometer/tests/unit/objectstore/__init__.py b/ceilometer/tests/unit/objectstore/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/objectstore/test_rgw.py b/ceilometer/tests/unit/objectstore/test_rgw.py deleted file mode 100644 index 5e0600d5..00000000 --- a/ceilometer/tests/unit/objectstore/test_rgw.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2015 Reliance Jio Infocomm Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - -from keystoneauth1 import exceptions -import mock -from oslotest import base -from oslotest import mockpatch -import testscenarios.testcase - -from ceilometer.agent import manager -from ceilometer.objectstore import rgw -from ceilometer.objectstore.rgw_client import RGWAdminClient as rgw_client - -bucket_list1 = [rgw_client.Bucket('somefoo1', 10, 7)] -bucket_list2 = [rgw_client.Bucket('somefoo2', 2, 9)] -bucket_list3 = [rgw_client.Bucket('unlisted', 100, 100)] - -GET_BUCKETS = [('tenant-000', {'num_buckets': 2, 'size': 1042, - 'num_objects': 1001, 'buckets': bucket_list1}), - ('tenant-001', {'num_buckets': 2, 'size': 1042, - 'num_objects': 1001, 'buckets': bucket_list2}), - ('tenant-002-ignored', {'num_buckets': 2, 'size': 1042, - 'num_objects': 1001, - 'buckets': bucket_list3})] - -GET_USAGE = [('tenant-000', 10), - ('tenant-001', 11), - ('tenant-002-ignored', 12)] - -Tenant = collections.namedtuple('Tenant', 'id') -ASSIGNED_TENANTS = [Tenant('tenant-000'), Tenant('tenant-001')] - - -class TestManager(manager.AgentManager): - - def __init__(self): - super(TestManager, self).__init__() - self._keystone = mock.Mock() - self._catalog = (self._keystone.session.auth.get_access. - return_value.service_catalog) - self._catalog.url_for.return_value = 'http://foobar/endpoint' - - -class TestRgwPollster(testscenarios.testcase.WithScenarios, - base.BaseTestCase): - - # Define scenarios to run all of the tests against all of the - # pollsters. - scenarios = [ - ('radosgw.objects', - {'factory': rgw.ObjectsPollster}), - ('radosgw.objects.size', - {'factory': rgw.ObjectsSizePollster}), - ('radosgw.objects.containers', - {'factory': rgw.ObjectsContainersPollster}), - ('radosgw.containers.objects', - {'factory': rgw.ContainersObjectsPollster}), - ('radosgw.containers.objects.size', - {'factory': rgw.ContainersSizePollster}), - ('radosgw.api.request', - {'factory': rgw.UsagePollster}), - ] - - @staticmethod - def fake_ks_service_catalog_url_for(*args, **kwargs): - raise exceptions.EndpointNotFound("Fake keystone exception") - - def fake_iter_accounts(self, ksclient, cache, tenants): - tenant_ids = [t.id for t in tenants] - for i in self.ACCOUNTS: - if i[0] in tenant_ids: - yield i - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(TestRgwPollster, self).setUp() - self.pollster = self.factory() - self.manager = TestManager() - - if self.pollster.CACHE_KEY_METHOD == 'rgw.get_bucket': - self.ACCOUNTS = GET_BUCKETS - else: - self.ACCOUNTS = GET_USAGE - - def tearDown(self): - super(TestRgwPollster, self).tearDown() - rgw._Base._ENDPOINT = None - - def test_iter_accounts_no_cache(self): - cache = {} - with mockpatch.PatchObject(self.factory, '_get_account_info', - return_value=[]): - data = list(self.pollster._iter_accounts(mock.Mock(), cache, - ASSIGNED_TENANTS)) - - self.assertIn(self.pollster.CACHE_KEY_METHOD, cache) - self.assertEqual([], data) - - def test_iter_accounts_cached(self): - # Verify that if a method has already been called, _iter_accounts - # uses the cached version and doesn't call rgw_clinet. - mock_method = mock.Mock() - mock_method.side_effect = AssertionError( - 'should not be called', - ) - - api_method = 'get_%s' % self.pollster.METHOD - - with mockpatch.PatchObject(rgw_client, api_method, new=mock_method): - cache = {self.pollster.CACHE_KEY_METHOD: [self.ACCOUNTS[0]]} - data = list(self.pollster._iter_accounts(mock.Mock(), cache, - ASSIGNED_TENANTS)) - self.assertEqual([self.ACCOUNTS[0]], data) - - def test_metering(self): - with mockpatch.PatchObject(self.factory, '_iter_accounts', - side_effect=self.fake_iter_accounts): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(2, len(samples), self.pollster.__class__) - - def test_get_meter_names(self): - with mockpatch.PatchObject(self.factory, '_iter_accounts', - side_effect=self.fake_iter_accounts): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(set([samples[0].name]), - set([s.name for s in samples])) - - def test_only_poll_assigned(self): - mock_method = mock.MagicMock() - endpoint = 'http://127.0.0.1:8000/admin' - api_method = 'get_%s' % self.pollster.METHOD - with mockpatch.PatchObject(rgw_client, api_method, new=mock_method): - with mockpatch.PatchObject( - self.manager._catalog, 'url_for', - return_value=endpoint): - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - expected = [mock.call(t.id) - for t in ASSIGNED_TENANTS] - self.assertEqual(expected, mock_method.call_args_list) - - def test_get_endpoint_only_once(self): - mock_url_for = mock.MagicMock() - mock_url_for.return_value = '/endpoint' - api_method = 'get_%s' % self.pollster.METHOD - with mockpatch.PatchObject(rgw_client, api_method, - new=mock.MagicMock()): - with mockpatch.PatchObject( - self.manager._catalog, 'url_for', - new=mock_url_for): - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - self.assertEqual(1, mock_url_for.call_count) - - def test_endpoint_notfound(self): - with mockpatch.PatchObject( - self.manager._catalog, 'url_for', - side_effect=self.fake_ks_service_catalog_url_for): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(0, len(samples)) diff --git a/ceilometer/tests/unit/objectstore/test_rgw_client.py b/ceilometer/tests/unit/objectstore/test_rgw_client.py deleted file mode 100644 index f2d1ef60..00000000 --- a/ceilometer/tests/unit/objectstore/test_rgw_client.py +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright (C) 2015 Reliance Jio Infocomm Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -import mock -from oslotest import base - -from ceilometer.objectstore.rgw_client import RGWAdminAPIFailed -from ceilometer.objectstore.rgw_client import RGWAdminClient - - -RGW_ADMIN_BUCKETS = ''' -[ - { - "max_marker": "", - "ver": 2001, - "usage": { - "rgw.main": { - "size_kb_actual": 16000, - "num_objects": 1000, - "size_kb": 1000 - } - }, - "bucket": "somefoo", - "owner": "admin", - "master_ver": 0, - "mtime": 1420176126, - "marker": "default.4126.1", - "bucket_quota": { - "max_objects": -1, - "enabled": false, - "max_size_kb": -1 - }, - "id": "default.4126.1", - "pool": ".rgw.buckets", - "index_pool": ".rgw.buckets.index" - }, - { - "max_marker": "", - "ver": 3, - "usage": { - "rgw.main": { - "size_kb_actual": 43, - "num_objects": 1, - "size_kb": 42 - } - }, - "bucket": "somefoo31", - "owner": "admin", - "master_ver": 0, - "mtime": 1420176134, - "marker": "default.4126.5", - "bucket_quota": { - "max_objects": -1, - "enabled": false, - "max_size_kb": -1 - }, - "id": "default.4126.5", - "pool": ".rgw.buckets", - "index_pool": ".rgw.buckets.index" - } -]''' - -RGW_ADMIN_USAGE = ''' -{ "entries": [ - { "owner": "5f7fe2d5352e466f948f49341e33d107", - "buckets": [ - { "bucket": "", - "time": "2015-01-23 09:00:00.000000Z", - "epoch": 1422003600, - "categories": [ - { "category": "list_buckets", - "bytes_sent": 46, - "bytes_received": 0, - "ops": 3, - "successful_ops": 3}, - { "category": "stat_account", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 1}]}, - { "bucket": "foodsgh", - "time": "2015-01-23 09:00:00.000000Z", - "epoch": 1422003600, - "categories": [ - { "category": "create_bucket", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 1}, - { "category": "get_obj", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 0}, - { "category": "put_obj", - "bytes_sent": 0, - "bytes_received": 238, - "ops": 1, - "successful_ops": 1}]}]}], - "summary": [ - { "user": "5f7fe2d5352e466f948f49341e33d107", - "categories": [ - { "category": "create_bucket", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 1}, - { "category": "get_obj", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 0}, - { "category": "list_buckets", - "bytes_sent": 46, - "bytes_received": 0, - "ops": 3, - "successful_ops": 3}, - { "category": "put_obj", - "bytes_sent": 0, - "bytes_received": 238, - "ops": 1, - "successful_ops": 1}, - { "category": "stat_account", - "bytes_sent": 0, - "bytes_received": 0, - "ops": 1, - "successful_ops": 1}], - "total": { "bytes_sent": 46, - "bytes_received": 238, - "ops": 7, - "successful_ops": 6}}]} -''' - -buckets_json = json.loads(RGW_ADMIN_BUCKETS) -usage_json = json.loads(RGW_ADMIN_USAGE) - - -class TestRGWAdminClient(base.BaseTestCase): - - def setUp(self): - super(TestRGWAdminClient, self).setUp() - self.client = RGWAdminClient('http://127.0.0.1:8080/admin', - 'abcde', 'secret') - self.get_resp = mock.MagicMock() - self.get = mock.patch('requests.get', - return_value=self.get_resp).start() - - def test_make_request_exception(self): - self.get_resp.status_code = 403 - self.assertRaises(RGWAdminAPIFailed, self.client._make_request, - *('foo', {})) - - def test_make_request(self): - self.get_resp.status_code = 200 - self.get_resp.json.return_value = buckets_json - actual = self.client._make_request('foo', []) - self.assertEqual(buckets_json, actual) - - def test_get_buckets(self): - self.get_resp.status_code = 200 - self.get_resp.json.return_value = buckets_json - actual = self.client.get_bucket('foo') - bucket_list = [RGWAdminClient.Bucket('somefoo', 1000, 1000), - RGWAdminClient.Bucket('somefoo31', 1, 42), - ] - expected = {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, - 'buckets': bucket_list} - self.assertEqual(expected, actual) - - def test_get_usage(self): - self.get_resp.status_code = 200 - self.get_resp.json.return_value = usage_json - actual = self.client.get_usage('foo') - expected = 7 - self.assertEqual(expected, actual) diff --git a/ceilometer/tests/unit/objectstore/test_swift.py b/ceilometer/tests/unit/objectstore/test_swift.py deleted file mode 100644 index 318dd874..00000000 --- a/ceilometer/tests/unit/objectstore/test_swift.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright 2012 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections - -from keystoneauth1 import exceptions -import mock -from oslotest import base -from oslotest import mockpatch -from swiftclient import client as swift_client -import testscenarios.testcase - -from ceilometer.agent import manager -from ceilometer.objectstore import swift - -HEAD_ACCOUNTS = [('tenant-000', {'x-account-object-count': 12, - 'x-account-bytes-used': 321321321, - 'x-account-container-count': 7, - }), - ('tenant-001', {'x-account-object-count': 34, - 'x-account-bytes-used': 9898989898, - 'x-account-container-count': 17, - }), - ('tenant-002-ignored', {'x-account-object-count': 34, - 'x-account-bytes-used': 9898989898, - 'x-account-container-count': 17, - })] - -GET_ACCOUNTS = [('tenant-000', ({'x-account-object-count': 10, - 'x-account-bytes-used': 123123, - 'x-account-container-count': 2, - }, - [{'count': 10, - 'bytes': 123123, - 'name': 'my_container'}, - {'count': 0, - 'bytes': 0, - 'name': 'new_container' - }])), - ('tenant-001', ({'x-account-object-count': 0, - 'x-account-bytes-used': 0, - 'x-account-container-count': 0, - }, [])), - ('tenant-002-ignored', ({'x-account-object-count': 0, - 'x-account-bytes-used': 0, - 'x-account-container-count': 0, - }, []))] - -Tenant = collections.namedtuple('Tenant', 'id') -ASSIGNED_TENANTS = [Tenant('tenant-000'), Tenant('tenant-001')] - - -class TestManager(manager.AgentManager): - - def __init__(self): - super(TestManager, self).__init__() - self._keystone = mock.MagicMock() - self._keystone_last_exception = None - self._service_catalog = (self._keystone.session.auth. - get_access.return_value.service_catalog) - self._auth_token = (self._keystone.session.auth. - get_access.return_value.auth_token) - - -class TestSwiftPollster(testscenarios.testcase.WithScenarios, - base.BaseTestCase): - - # Define scenarios to run all of the tests against all of the - # pollsters. - scenarios = [ - ('storage.objects', - {'factory': swift.ObjectsPollster}), - ('storage.objects.size', - {'factory': swift.ObjectsSizePollster}), - ('storage.objects.containers', - {'factory': swift.ObjectsContainersPollster}), - ('storage.containers.objects', - {'factory': swift.ContainersObjectsPollster}), - ('storage.containers.objects.size', - {'factory': swift.ContainersSizePollster}), - ] - - @staticmethod - def fake_ks_service_catalog_url_for(*args, **kwargs): - raise exceptions.EndpointNotFound("Fake keystone exception") - - def fake_iter_accounts(self, ksclient, cache, tenants): - tenant_ids = [t.id for t in tenants] - for i in self.ACCOUNTS: - if i[0] in tenant_ids: - yield i - - @mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock()) - def setUp(self): - super(TestSwiftPollster, self).setUp() - self.pollster = self.factory() - self.manager = TestManager() - - if self.pollster.CACHE_KEY_METHOD == 'swift.head_account': - self.ACCOUNTS = HEAD_ACCOUNTS - else: - self.ACCOUNTS = GET_ACCOUNTS - - def tearDown(self): - super(TestSwiftPollster, self).tearDown() - swift._Base._ENDPOINT = None - - def test_iter_accounts_no_cache(self): - cache = {} - with mockpatch.PatchObject(self.factory, '_get_account_info', - return_value=[]): - data = list(self.pollster._iter_accounts(mock.Mock(), cache, - ASSIGNED_TENANTS)) - - self.assertIn(self.pollster.CACHE_KEY_METHOD, cache) - self.assertEqual([], data) - - def test_iter_accounts_cached(self): - # Verify that if a method has already been called, _iter_accounts - # uses the cached version and doesn't call swiftclient. - mock_method = mock.Mock() - mock_method.side_effect = AssertionError( - 'should not be called', - ) - - api_method = '%s_account' % self.pollster.METHOD - with mockpatch.PatchObject(swift_client, api_method, new=mock_method): - with mockpatch.PatchObject(self.factory, '_neaten_url'): - cache = {self.pollster.CACHE_KEY_METHOD: [self.ACCOUNTS[0]]} - data = list(self.pollster._iter_accounts(mock.Mock(), cache, - ASSIGNED_TENANTS)) - self.assertEqual([self.ACCOUNTS[0]], data) - - def test_neaten_url(self): - test_endpoints = ['http://127.0.0.1:8080', - 'http://127.0.0.1:8080/swift'] - test_tenant_id = 'a7fd1695fa154486a647e44aa99a1b9b' - for test_endpoint in test_endpoints: - standard_url = test_endpoint + '/v1/AUTH_' + test_tenant_id - - url = swift._Base._neaten_url(test_endpoint, test_tenant_id) - self.assertEqual(standard_url, url) - url = swift._Base._neaten_url(test_endpoint + '/', test_tenant_id) - self.assertEqual(standard_url, url) - url = swift._Base._neaten_url(test_endpoint + '/v1', - test_tenant_id) - self.assertEqual(standard_url, url) - url = swift._Base._neaten_url(standard_url, test_tenant_id) - self.assertEqual(standard_url, url) - - def test_metering(self): - with mockpatch.PatchObject(self.factory, '_iter_accounts', - side_effect=self.fake_iter_accounts): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(2, len(samples), self.pollster.__class__) - - def test_get_meter_names(self): - with mockpatch.PatchObject(self.factory, '_iter_accounts', - side_effect=self.fake_iter_accounts): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(set([samples[0].name]), - set([s.name for s in samples])) - - def test_only_poll_assigned(self): - mock_method = mock.MagicMock() - endpoint = 'end://point/' - api_method = '%s_account' % self.pollster.METHOD - with mockpatch.PatchObject(swift_client, api_method, new=mock_method): - with mockpatch.PatchObject( - self.manager._service_catalog, 'url_for', - return_value=endpoint): - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - expected = [mock.call(self.pollster._neaten_url(endpoint, t.id), - self.manager._auth_token) - for t in ASSIGNED_TENANTS] - self.assertEqual(expected, mock_method.call_args_list) - - def test_get_endpoint_only_once(self): - endpoint = 'end://point/' - mock_url_for = mock.MagicMock(return_value=endpoint) - api_method = '%s_account' % self.pollster.METHOD - with mockpatch.PatchObject(swift_client, api_method, - new=mock.MagicMock()): - with mockpatch.PatchObject( - self.manager._service_catalog, 'url_for', - new=mock_url_for): - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - self.assertEqual(1, mock_url_for.call_count) - - def test_endpoint_notfound(self): - with mockpatch.PatchObject( - self.manager._service_catalog, 'url_for', - side_effect=self.fake_ks_service_catalog_url_for): - samples = list(self.pollster.get_samples(self.manager, {}, - ASSIGNED_TENANTS)) - - self.assertEqual(0, len(samples)) diff --git a/ceilometer/tests/unit/publisher/__init__.py b/ceilometer/tests/unit/publisher/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/publisher/test_file.py b/ceilometer/tests/unit/publisher/test_file.py deleted file mode 100644 index 9857b1a3..00000000 --- a/ceilometer/tests/unit/publisher/test_file.py +++ /dev/null @@ -1,117 +0,0 @@ -# -# Copyright 2013-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/file.py -""" - -import datetime -import logging.handlers -import os -import tempfile - -from oslo_utils import netutils -from oslotest import base - -from ceilometer.publisher import file -from ceilometer import sample - - -class TestFilePublisher(base.BaseTestCase): - - test_data = [ - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - ] - - def test_file_publisher_maxbytes(self): - # Test valid configurations - tempdir = tempfile.mkdtemp() - name = '%s/log_file' % tempdir - parsed_url = netutils.urlsplit('file://%s?max_bytes=50&backup_count=3' - % name) - publisher = file.FilePublisher(parsed_url) - publisher.publish_samples(self.test_data) - - handler = publisher.publisher_logger.handlers[0] - self.assertIsInstance(handler, - logging.handlers.RotatingFileHandler) - self.assertEqual([50, name, 3], [handler.maxBytes, - handler.baseFilename, - handler.backupCount]) - # The rotating file gets created since only allow 50 bytes. - self.assertTrue(os.path.exists('%s.1' % name)) - - def test_file_publisher(self): - # Test missing max bytes, backup count configurations - tempdir = tempfile.mkdtemp() - name = '%s/log_file_plain' % tempdir - parsed_url = netutils.urlsplit('file://%s' % name) - publisher = file.FilePublisher(parsed_url) - publisher.publish_samples(self.test_data) - - handler = publisher.publisher_logger.handlers[0] - self.assertIsInstance(handler, - logging.handlers.RotatingFileHandler) - self.assertEqual([0, name, 0], [handler.maxBytes, - handler.baseFilename, - handler.backupCount]) - # Test the content is corrected saved in the file - self.assertTrue(os.path.exists(name)) - with open(name, 'r') as f: - content = f.read() - for sample_item in self.test_data: - self.assertIn(sample_item.id, content) - self.assertIn(sample_item.timestamp, content) - - def test_file_publisher_invalid(self): - # Test invalid max bytes, backup count configurations - tempdir = tempfile.mkdtemp() - parsed_url = netutils.urlsplit( - 'file://%s/log_file_bad' - '?max_bytes=yus&backup_count=5y' % tempdir) - publisher = file.FilePublisher(parsed_url) - publisher.publish_samples(self.test_data) - - self.assertIsNone(publisher.publisher_logger) diff --git a/ceilometer/tests/unit/publisher/test_http.py b/ceilometer/tests/unit/publisher/test_http.py deleted file mode 100644 index 78ceb987..00000000 --- a/ceilometer/tests/unit/publisher/test_http.py +++ /dev/null @@ -1,170 +0,0 @@ -# -# Copyright 2016 IBM -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/http.py -""" - -import datetime -import mock -from oslotest import base -from requests import Session -from six.moves.urllib import parse as urlparse -import uuid - -from ceilometer.event.storage import models as event -from ceilometer.publisher import http -from ceilometer import sample - - -class TestHttpPublisher(base.BaseTestCase): - - resource_id = str(uuid.uuid4()) - - sample_data = [ - sample.Sample( - name='alpha', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id=resource_id, - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='beta', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id=resource_id, - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='gamma', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id=resource_id, - timestamp=datetime.datetime.now().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - ] - - event_data = [event.Event( - message_id=str(uuid.uuid4()), event_type='event_%d' % i, - generated=datetime.datetime.utcnow().isoformat(), - traits=[], raw={'payload': {'some': 'aa'}}) for i in range(0, 2)] - - empty_event_data = [event.Event( - message_id=str(uuid.uuid4()), event_type='event_%d' % i, - generated=datetime.datetime.utcnow().isoformat(), - traits=[], raw={'payload': {}}) for i in range(0, 2)] - - def test_http_publisher_config(self): - """Test publisher config parameters.""" - # invalid hostname, the given url, results in an empty hostname - parsed_url = urlparse.urlparse('http:/aaa.bb/path') - self.assertRaises(ValueError, http.HttpPublisher, - parsed_url) - - # invalid port - parsed_url = urlparse.urlparse('http://aaa:bb/path') - self.assertRaises(ValueError, http.HttpPublisher, - parsed_url) - - parsed_url = urlparse.urlparse('http://localhost:90/path1') - publisher = http.HttpPublisher(parsed_url) - # By default, timeout and retry_count should be set to 1000 and 2 - # respectively - self.assertEqual(1, publisher.timeout) - self.assertEqual(2, publisher.max_retries) - - parsed_url = urlparse.urlparse('http://localhost:90/path1?' - 'timeout=19&max_retries=4') - publisher = http.HttpPublisher(parsed_url) - self.assertEqual(19, publisher.timeout) - self.assertEqual(4, publisher.max_retries) - - parsed_url = urlparse.urlparse('http://localhost:90/path1?' - 'timeout=19') - publisher = http.HttpPublisher(parsed_url) - self.assertEqual(19, publisher.timeout) - self.assertEqual(2, publisher.max_retries) - - parsed_url = urlparse.urlparse('http://localhost:90/path1?' - 'max_retries=6') - publisher = http.HttpPublisher(parsed_url) - self.assertEqual(1, publisher.timeout) - self.assertEqual(6, publisher.max_retries) - - @mock.patch('ceilometer.publisher.http.LOG') - def test_http_post_samples(self, thelog): - """Test publisher post.""" - parsed_url = urlparse.urlparse('http://localhost:90/path1') - publisher = http.HttpPublisher(parsed_url) - - res = mock.Mock() - res.status_code = 200 - with mock.patch.object(Session, 'post', return_value=res) as m_req: - publisher.publish_samples(self.sample_data) - - self.assertEqual(1, m_req.call_count) - self.assertFalse(thelog.error.called) - - res.status_code = 401 - with mock.patch.object(Session, 'post', return_value=res) as m_req: - publisher.publish_samples(self.sample_data) - - self.assertEqual(1, m_req.call_count) - self.assertTrue(thelog.error.called) - - @mock.patch('ceilometer.publisher.http.LOG') - def test_http_post_events(self, thelog): - """Test publisher post.""" - parsed_url = urlparse.urlparse('http://localhost:90/path1') - publisher = http.HttpPublisher(parsed_url) - - res = mock.Mock() - res.status_code = 200 - with mock.patch.object(Session, 'post', return_value=res) as m_req: - publisher.publish_events(self.event_data) - - self.assertEqual(1, m_req.call_count) - self.assertFalse(thelog.error.called) - - res.status_code = 401 - with mock.patch.object(Session, 'post', return_value=res) as m_req: - publisher.publish_samples(self.event_data) - - self.assertEqual(1, m_req.call_count) - self.assertTrue(thelog.error.called) - - @mock.patch('ceilometer.publisher.http.LOG') - def test_http_post_empty_data(self, thelog): - parsed_url = urlparse.urlparse('http://localhost:90/path1') - publisher = http.HttpPublisher(parsed_url) - - res = mock.Mock() - res.status_code = 200 - with mock.patch.object(Session, 'post', return_value=res) as m_req: - publisher.publish_events(self.empty_event_data) - - self.assertEqual(0, m_req.call_count) - self.assertTrue(thelog.debug.called) diff --git a/ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py b/ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py deleted file mode 100644 index aec30d84..00000000 --- a/ceilometer/tests/unit/publisher/test_kafka_broker_publisher.py +++ /dev/null @@ -1,210 +0,0 @@ -# -# Copyright 2015 Cisco Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/kafka_broker.py -""" -import datetime -import uuid - -import mock -from oslo_utils import netutils - -from ceilometer.event.storage import models as event -from ceilometer.publisher import kafka_broker as kafka -from ceilometer.publisher import messaging as msg_publisher -from ceilometer import sample -from ceilometer.tests import base as tests_base - - -@mock.patch('ceilometer.publisher.kafka_broker.LOG', mock.Mock()) -@mock.patch('ceilometer.publisher.kafka_broker.kafka.KafkaClient', - mock.Mock()) -class TestKafkaPublisher(tests_base.BaseTestCase): - test_event_data = [ - event.Event(message_id=uuid.uuid4(), - event_type='event_%d' % i, - generated=datetime.datetime.utcnow(), - traits=[], raw={}) - for i in range(0, 5) - ] - - test_data = [ - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test3', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - ] - - def test_publish(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer')) - - with mock.patch.object(publisher, '_producer') as fake_producer: - publisher.publish_samples(self.test_data) - self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) - self.assertEqual(0, len(publisher.local_queue)) - - def test_publish_without_options(self): - publisher = kafka.KafkaBrokerPublisher( - netutils.urlsplit('kafka://127.0.0.1:9092')) - - with mock.patch.object(publisher, '_producer') as fake_producer: - publisher.publish_samples(self.test_data) - self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) - self.assertEqual(0, len(publisher.local_queue)) - - def test_publish_to_host_without_policy(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer')) - self.assertEqual('default', publisher.policy) - - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=test')) - self.assertEqual('default', publisher.policy) - - def test_publish_to_host_with_default_policy(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=default')) - - with mock.patch.object(publisher, '_producer') as fake_producer: - fake_producer.send_messages.side_effect = TypeError - self.assertRaises(msg_publisher.DeliveryFailure, - publisher.publish_samples, - self.test_data) - self.assertEqual(100, len(fake_producer.send_messages.mock_calls)) - self.assertEqual(0, len(publisher.local_queue)) - - def test_publish_to_host_with_drop_policy(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=drop')) - - with mock.patch.object(publisher, '_producer') as fake_producer: - fake_producer.send_messages.side_effect = Exception("test") - publisher.publish_samples(self.test_data) - self.assertEqual(1, len(fake_producer.send_messages.mock_calls)) - self.assertEqual(0, len(publisher.local_queue)) - - def test_publish_to_host_with_queue_policy(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) - - with mock.patch.object(publisher, '_producer') as fake_producer: - fake_producer.send_messages.side_effect = Exception("test") - publisher.publish_samples(self.test_data) - self.assertEqual(1, len(fake_producer.send_messages.mock_calls)) - self.assertEqual(1, len(publisher.local_queue)) - - def test_publish_to_down_host_with_default_queue_size(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) - - with mock.patch.object(publisher, '_producer') as fake_producer: - fake_producer.send_messages.side_effect = Exception("test") - - for i in range(0, 2000): - for s in self.test_data: - s.name = 'test-%d' % i - publisher.publish_samples(self.test_data) - - self.assertEqual(1024, len(publisher.local_queue)) - self.assertEqual('test-976', - publisher.local_queue[0][1][0]['counter_name']) - self.assertEqual('test-1999', - publisher.local_queue[1023][1][0]['counter_name']) - - def test_publish_to_host_from_down_to_up_with_queue(self): - publisher = kafka.KafkaBrokerPublisher(netutils.urlsplit( - 'kafka://127.0.0.1:9092?topic=ceilometer&policy=queue')) - - with mock.patch.object(publisher, '_producer') as fake_producer: - fake_producer.send_messages.side_effect = Exception("test") - for i in range(0, 16): - for s in self.test_data: - s.name = 'test-%d' % i - publisher.publish_samples(self.test_data) - - self.assertEqual(16, len(publisher.local_queue)) - - fake_producer.send_messages.side_effect = None - for s in self.test_data: - s.name = 'test-%d' % 16 - publisher.publish_samples(self.test_data) - self.assertEqual(0, len(publisher.local_queue)) - - def test_publish_event_with_default_policy(self): - publisher = kafka.KafkaBrokerPublisher( - netutils.urlsplit('kafka://127.0.0.1:9092?topic=ceilometer')) - - with mock.patch.object(publisher, '_producer') as fake_producer: - publisher.publish_events(self.test_event_data) - self.assertEqual(5, len(fake_producer.send_messages.mock_calls)) - - with mock.patch.object(publisher, '_producer') as fake_producer: - fake_producer.send_messages.side_effect = Exception("test") - self.assertRaises(msg_publisher.DeliveryFailure, - publisher.publish_events, - self.test_event_data) - self.assertEqual(100, len(fake_producer.send_messages.mock_calls)) - self.assertEqual(0, len(publisher.local_queue)) diff --git a/ceilometer/tests/unit/publisher/test_messaging_publisher.py b/ceilometer/tests/unit/publisher/test_messaging_publisher.py deleted file mode 100644 index 203424e2..00000000 --- a/ceilometer/tests/unit/publisher/test_messaging_publisher.py +++ /dev/null @@ -1,290 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/messaging.py -""" -import datetime -import uuid - -import mock -from oslo_config import fixture as fixture_config -from oslo_utils import netutils -import testscenarios.testcase - -from ceilometer.event.storage import models as event -from ceilometer.publisher import messaging as msg_publisher -from ceilometer import sample -from ceilometer.tests import base as tests_base - - -class BasePublisherTestCase(tests_base.BaseTestCase): - test_event_data = [ - event.Event(message_id=uuid.uuid4(), - event_type='event_%d' % i, - generated=datetime.datetime.utcnow(), - traits=[], raw={}) - for i in range(0, 5) - ] - - test_sample_data = [ - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - sample.Sample( - name='test3', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - ), - ] - - def setUp(self): - super(BasePublisherTestCase, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.setup_messaging(self.CONF) - - -class NotifierOnlyPublisherTest(BasePublisherTestCase): - - @mock.patch('oslo_messaging.Notifier') - def test_publish_topic_override(self, notifier): - msg_publisher.SampleNotifierPublisher( - netutils.urlsplit('notifier://?topic=custom_topic')) - notifier.assert_called_with(mock.ANY, topics=['custom_topic'], - driver=mock.ANY, retry=mock.ANY, - publisher_id=mock.ANY) - - msg_publisher.EventNotifierPublisher( - netutils.urlsplit('notifier://?topic=custom_event_topic')) - notifier.assert_called_with(mock.ANY, topics=['custom_event_topic'], - driver=mock.ANY, retry=mock.ANY, - publisher_id=mock.ANY) - - -class TestPublisher(testscenarios.testcase.WithScenarios, - BasePublisherTestCase): - scenarios = [ - ('notifier', - dict(protocol="notifier", - publisher_cls=msg_publisher.SampleNotifierPublisher, - test_data=BasePublisherTestCase.test_sample_data, - pub_func='publish_samples', attr='source')), - ('event_notifier', - dict(protocol="notifier", - publisher_cls=msg_publisher.EventNotifierPublisher, - test_data=BasePublisherTestCase.test_event_data, - pub_func='publish_events', attr='event_type')), - ] - - def setUp(self): - super(TestPublisher, self).setUp() - self.topic = (self.CONF.publisher_notifier.event_topic - if self.pub_func == 'publish_events' else - self.CONF.publisher_notifier.metering_topic) - - -class TestPublisherPolicy(TestPublisher): - @mock.patch('ceilometer.publisher.messaging.LOG') - def test_published_with_no_policy(self, mylog): - publisher = self.publisher_cls( - netutils.urlsplit('%s://' % self.protocol)) - side_effect = msg_publisher.DeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - self.assertRaises( - msg_publisher.DeliveryFailure, - getattr(publisher, self.pub_func), - self.test_data) - self.assertTrue(mylog.info.called) - self.assertEqual('default', publisher.policy) - self.assertEqual(0, len(publisher.local_queue)) - fake_send.assert_called_once_with( - self.topic, mock.ANY) - - @mock.patch('ceilometer.publisher.messaging.LOG') - def test_published_with_policy_block(self, mylog): - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=default' % self.protocol)) - side_effect = msg_publisher.DeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - self.assertRaises( - msg_publisher.DeliveryFailure, - getattr(publisher, self.pub_func), - self.test_data) - self.assertTrue(mylog.info.called) - self.assertEqual(0, len(publisher.local_queue)) - fake_send.assert_called_once_with( - self.topic, mock.ANY) - - @mock.patch('ceilometer.publisher.messaging.LOG') - def test_published_with_policy_incorrect(self, mylog): - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=notexist' % self.protocol)) - side_effect = msg_publisher.DeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - self.assertRaises( - msg_publisher.DeliveryFailure, - getattr(publisher, self.pub_func), - self.test_data) - self.assertTrue(mylog.warning.called) - self.assertEqual('default', publisher.policy) - self.assertEqual(0, len(publisher.local_queue)) - fake_send.assert_called_once_with( - self.topic, mock.ANY) - - -@mock.patch('ceilometer.publisher.messaging.LOG', mock.Mock()) -class TestPublisherPolicyReactions(TestPublisher): - - def test_published_with_policy_drop_and_rpc_down(self): - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=drop' % self.protocol)) - side_effect = msg_publisher.DeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - getattr(publisher, self.pub_func)(self.test_data) - self.assertEqual(0, len(publisher.local_queue)) - fake_send.assert_called_once_with( - self.topic, mock.ANY) - - def test_published_with_policy_queue_and_rpc_down(self): - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=queue' % self.protocol)) - side_effect = msg_publisher.DeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - - getattr(publisher, self.pub_func)(self.test_data) - self.assertEqual(1, len(publisher.local_queue)) - fake_send.assert_called_once_with( - self.topic, mock.ANY) - - def test_published_with_policy_queue_and_rpc_down_up(self): - self.rpc_unreachable = True - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=queue' % self.protocol)) - - side_effect = msg_publisher.DeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - getattr(publisher, self.pub_func)(self.test_data) - - self.assertEqual(1, len(publisher.local_queue)) - - fake_send.side_effect = mock.MagicMock() - getattr(publisher, self.pub_func)(self.test_data) - - self.assertEqual(0, len(publisher.local_queue)) - - topic = self.topic - expected = [mock.call(topic, mock.ANY), - mock.call(topic, mock.ANY), - mock.call(topic, mock.ANY)] - self.assertEqual(expected, fake_send.mock_calls) - - def test_published_with_policy_sized_queue_and_rpc_down(self): - publisher = self.publisher_cls(netutils.urlsplit( - '%s://?policy=queue&max_queue_length=3' % self.protocol)) - - side_effect = msg_publisher.DeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - for i in range(0, 5): - for s in self.test_data: - setattr(s, self.attr, 'test-%d' % i) - getattr(publisher, self.pub_func)(self.test_data) - - self.assertEqual(3, len(publisher.local_queue)) - self.assertEqual( - 'test-2', - publisher.local_queue[0][1][0][self.attr] - ) - self.assertEqual( - 'test-3', - publisher.local_queue[1][1][0][self.attr] - ) - self.assertEqual( - 'test-4', - publisher.local_queue[2][1][0][self.attr] - ) - - def test_published_with_policy_default_sized_queue_and_rpc_down(self): - publisher = self.publisher_cls( - netutils.urlsplit('%s://?policy=queue' % self.protocol)) - - side_effect = msg_publisher.DeliveryFailure() - with mock.patch.object(publisher, '_send') as fake_send: - fake_send.side_effect = side_effect - for i in range(0, 2000): - for s in self.test_data: - setattr(s, self.attr, 'test-%d' % i) - getattr(publisher, self.pub_func)(self.test_data) - - self.assertEqual(1024, len(publisher.local_queue)) - self.assertEqual( - 'test-976', - publisher.local_queue[0][1][0][self.attr] - ) - self.assertEqual( - 'test-1999', - publisher.local_queue[1023][1][0][self.attr] - ) diff --git a/ceilometer/tests/unit/publisher/test_udp.py b/ceilometer/tests/unit/publisher/test_udp.py deleted file mode 100644 index ada303eb..00000000 --- a/ceilometer/tests/unit/publisher/test_udp.py +++ /dev/null @@ -1,174 +0,0 @@ -# -# Copyright 2013-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/udp.py -""" - -import datetime -import socket - -import mock -import msgpack -from oslo_config import fixture as fixture_config -from oslo_utils import netutils -from oslotest import base - -from ceilometer.publisher import udp -from ceilometer.publisher import utils -from ceilometer import sample - - -COUNTER_SOURCE = 'testsource' - - -class TestUDPPublisher(base.BaseTestCase): - test_data = [ - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - source=COUNTER_SOURCE, - ), - sample.Sample( - name='test', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - source=COUNTER_SOURCE, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - source=COUNTER_SOURCE, - ), - sample.Sample( - name='test2', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - source=COUNTER_SOURCE, - ), - sample.Sample( - name='test3', - type=sample.TYPE_CUMULATIVE, - unit='', - volume=1, - user_id='test', - project_id='test', - resource_id='test_run_tasks', - timestamp=datetime.datetime.utcnow().isoformat(), - resource_metadata={'name': 'TestPublish'}, - source=COUNTER_SOURCE, - ), - ] - - @staticmethod - def _make_fake_socket(published): - def _fake_socket_socket(family, type): - def record_data(msg, dest): - published.append((msg, dest)) - - udp_socket = mock.Mock() - udp_socket.sendto = record_data - return udp_socket - - return _fake_socket_socket - - def setUp(self): - super(TestUDPPublisher, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF.publisher.telemetry_secret = 'not-so-secret' - - def _check_udp_socket(self, url, expected_addr_family): - with mock.patch.object(socket, 'socket') as mock_socket: - udp.UDPPublisher(netutils.urlsplit(url)) - mock_socket.assert_called_with(expected_addr_family, - socket.SOCK_DGRAM) - - def test_publisher_udp_socket_ipv4(self): - self._check_udp_socket('udp://127.0.0.1:4952', - socket.AF_INET) - - def test_publisher_udp_socket_ipv6(self): - self._check_udp_socket('udp://[::1]:4952', - socket.AF_INET6) - - def test_published(self): - self.data_sent = [] - with mock.patch('socket.socket', - self._make_fake_socket(self.data_sent)): - publisher = udp.UDPPublisher( - netutils.urlsplit('udp://somehost')) - publisher.publish_samples(self.test_data) - - self.assertEqual(5, len(self.data_sent)) - - sent_counters = [] - - for data, dest in self.data_sent: - counter = msgpack.loads(data, encoding="utf-8") - sent_counters.append(counter) - - # Check destination - self.assertEqual(('somehost', - self.CONF.collector.udp_port), dest) - - # Check that counters are equal - def sort_func(counter): - return counter['counter_name'] - - counters = [utils.meter_message_from_counter(d, "not-so-secret") - for d in self.test_data] - counters.sort(key=sort_func) - sent_counters.sort(key=sort_func) - self.assertEqual(counters, sent_counters) - - @staticmethod - def _raise_ioerror(*args): - raise IOError - - def _make_broken_socket(self, family, type): - udp_socket = mock.Mock() - udp_socket.sendto = self._raise_ioerror - return udp_socket - - def test_publish_error(self): - with mock.patch('socket.socket', - self._make_broken_socket): - publisher = udp.UDPPublisher( - netutils.urlsplit('udp://localhost')) - publisher.publish_samples(self.test_data) diff --git a/ceilometer/tests/unit/publisher/test_utils.py b/ceilometer/tests/unit/publisher/test_utils.py deleted file mode 100644 index 5b5f6736..00000000 --- a/ceilometer/tests/unit/publisher/test_utils.py +++ /dev/null @@ -1,135 +0,0 @@ -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Tests for ceilometer/publisher/utils.py -""" -from oslo_serialization import jsonutils -from oslotest import base - -from ceilometer.publisher import utils - - -class TestSignature(base.BaseTestCase): - def test_compute_signature_change_key(self): - sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, - 'not-so-secret') - sig2 = utils.compute_signature({'A': 'A', 'b': 'B'}, - 'not-so-secret') - self.assertNotEqual(sig1, sig2) - - def test_compute_signature_change_value(self): - sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, - 'not-so-secret') - sig2 = utils.compute_signature({'a': 'a', 'b': 'B'}, - 'not-so-secret') - self.assertNotEqual(sig1, sig2) - - def test_compute_signature_same(self): - sig1 = utils.compute_signature({'a': 'A', 'b': 'B'}, - 'not-so-secret') - sig2 = utils.compute_signature({'a': 'A', 'b': 'B'}, - 'not-so-secret') - self.assertEqual(sig1, sig2) - - def test_compute_signature_signed(self): - data = {'a': 'A', 'b': 'B'} - sig1 = utils.compute_signature(data, 'not-so-secret') - data['message_signature'] = sig1 - sig2 = utils.compute_signature(data, 'not-so-secret') - self.assertEqual(sig1, sig2) - - def test_compute_signature_use_configured_secret(self): - data = {'a': 'A', 'b': 'B'} - sig1 = utils.compute_signature(data, 'not-so-secret') - sig2 = utils.compute_signature(data, 'different-value') - self.assertNotEqual(sig1, sig2) - - def test_verify_signature_signed(self): - data = {'a': 'A', 'b': 'B'} - sig1 = utils.compute_signature(data, 'not-so-secret') - data['message_signature'] = sig1 - self.assertTrue(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_unsigned(self): - data = {'a': 'A', 'b': 'B'} - self.assertFalse(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_incorrect(self): - data = {'a': 'A', 'b': 'B', - 'message_signature': 'Not the same'} - self.assertFalse(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_invalid_encoding(self): - data = {'a': 'A', 'b': 'B', - 'message_signature': ''} - self.assertFalse(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_unicode(self): - data = {'a': 'A', 'b': 'B', - 'message_signature': u''} - self.assertFalse(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_nested(self): - data = {'a': 'A', - 'b': 'B', - 'nested': {'a': 'A', - 'b': 'B', - }, - } - data['message_signature'] = utils.compute_signature( - data, - 'not-so-secret') - self.assertTrue(utils.verify_signature(data, 'not-so-secret')) - - def test_verify_signature_nested_json(self): - data = {'a': 'A', - 'b': 'B', - 'nested': {'a': 'A', - 'b': 'B', - 'c': ('c',), - 'd': ['d'] - }, - } - data['message_signature'] = utils.compute_signature( - data, - 'not-so-secret') - jsondata = jsonutils.loads(jsonutils.dumps(data)) - self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) - - def test_verify_unicode_symbols(self): - data = {u'a\xe9\u0437': 'A', - 'b': u'B\xe9\u0437' - } - data['message_signature'] = utils.compute_signature( - data, - 'not-so-secret') - jsondata = jsonutils.loads(jsonutils.dumps(data)) - self.assertTrue(utils.verify_signature(jsondata, 'not-so-secret')) - - def test_besteffort_compare_digest(self): - hash1 = "f5ac3fe42b80b80f979825d177191bc5" - hash2 = "f5ac3fe42b80b80f979825d177191bc5" - hash3 = "1dece7821bf3fd70fe1309eaa37d52a2" - hash4 = b"f5ac3fe42b80b80f979825d177191bc5" - hash5 = b"f5ac3fe42b80b80f979825d177191bc5" - hash6 = b"1dece7821bf3fd70fe1309eaa37d52a2" - - self.assertTrue(utils.besteffort_compare_digest(hash1, hash2)) - self.assertFalse(utils.besteffort_compare_digest(hash1, hash3)) - self.assertTrue(utils.besteffort_compare_digest(hash4, hash5)) - self.assertFalse(utils.besteffort_compare_digest(hash4, hash6)) - - def test_verify_no_secret(self): - data = {'a': 'A', 'b': 'B'} - self.assertTrue(utils.verify_signature(data, '')) diff --git a/ceilometer/tests/unit/storage/test_base.py b/ceilometer/tests/unit/storage/test_base.py deleted file mode 100644 index f6b3e989..00000000 --- a/ceilometer/tests/unit/storage/test_base.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2013 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import datetime -import math - -from oslotest import base as testbase - -from ceilometer.storage import base - - -class BaseTest(testbase.BaseTestCase): - - def test_iter_period(self): - times = list(base.iter_period( - datetime.datetime(2013, 1, 1, 12, 0), - datetime.datetime(2013, 1, 1, 13, 0), - 60)) - self.assertEqual(60, len(times)) - self.assertEqual((datetime.datetime(2013, 1, 1, 12, 10), - datetime.datetime(2013, 1, 1, 12, 11)), times[10]) - self.assertEqual((datetime.datetime(2013, 1, 1, 12, 21), - datetime.datetime(2013, 1, 1, 12, 22)), times[21]) - - def test_iter_period_bis(self): - times = list(base.iter_period( - datetime.datetime(2013, 1, 2, 13, 0), - datetime.datetime(2013, 1, 2, 14, 0), - 55)) - self.assertEqual(math.ceil(3600 / 55.0), len(times)) - self.assertEqual((datetime.datetime(2013, 1, 2, 13, 9, 10), - datetime.datetime(2013, 1, 2, 13, 10, 5)), - times[10]) - self.assertEqual((datetime.datetime(2013, 1, 2, 13, 19, 15), - datetime.datetime(2013, 1, 2, 13, 20, 10)), - times[21]) - - def test_handle_sort_key(self): - sort_keys_meter = base._handle_sort_key('meter', 'foo') - self.assertEqual(['foo', 'user_id', 'project_id'], sort_keys_meter) - - sort_keys_resource = base._handle_sort_key('resource', 'project_id') - self.assertEqual(['project_id', 'user_id', 'timestamp'], - sort_keys_resource) diff --git a/ceilometer/tests/unit/storage/test_get_connection.py b/ceilometer/tests/unit/storage/test_get_connection.py index 4eb094e9..4adee6dd 100644 --- a/ceilometer/tests/unit/storage/test_get_connection.py +++ b/ceilometer/tests/unit/storage/test_get_connection.py @@ -14,34 +14,27 @@ # under the License. """Tests for ceilometer/storage/ """ -import unittest import mock from oslo_config import fixture as fixture_config from oslotest import base import retrying -try: - from ceilometer.event.storage import impl_hbase as impl_hbase_event -except ImportError: - impl_hbase_event = None +from ceilometer.event.storage import impl_log +from ceilometer.event.storage import impl_sqlalchemy from ceilometer import storage -from ceilometer.storage import impl_log -from ceilometer.storage import impl_sqlalchemy import six class EngineTest(base.BaseTestCase): def test_get_connection(self): - engine = storage.get_connection('log://localhost', - 'ceilometer.metering.storage') + engine = storage.get_connection('log://localhost') self.assertIsInstance(engine, impl_log.Connection) def test_get_connection_no_such_engine(self): try: - storage.get_connection('no-such-engine://localhost', - 'ceilometer.metering.storage') + storage.get_connection('no-such-engine://localhost') except RuntimeError as err: self.assertIn('no-such-engine', six.text_type(err)) @@ -74,44 +67,15 @@ class ConnectionConfigTest(base.BaseTestCase): self.CONF.set_override("connection", "log://", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_log.Connection) - conn = storage.get_connection_from_config(self.CONF, 'metering') - self.assertIsInstance(conn, impl_log.Connection) def test_two_urls(self): - self.CONF.set_override("connection", "log://", group="database") + self.CONF.set_override("connection", "sqlite://", group="database") + self.CONF.set_override("event_connection", "log://", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_log.Connection) - conn = storage.get_connection_from_config(self.CONF, 'metering') - self.assertIsInstance(conn, impl_log.Connection) - - @unittest.skipUnless(impl_hbase_event, 'need hbase implementation') - def test_three_urls(self): - self.CONF.set_override("connection", "log://", group="database") - self.CONF.set_override("event_connection", "hbase://__test__", - group="database") - conn = storage.get_connection_from_config(self.CONF) - self.assertIsInstance(conn, impl_log.Connection) - conn = storage.get_connection_from_config(self.CONF, 'metering') - self.assertIsInstance(conn, impl_log.Connection) - conn = storage.get_connection_from_config(self.CONF, 'event') - self.assertIsInstance(conn, impl_hbase_event.Connection) - - @unittest.skipUnless(impl_hbase_event, 'need hbase implementation') - def test_three_urls_no_default(self): - self.CONF.set_override("connection", None, group="database") - self.CONF.set_override("metering_connection", "log://", - group="database") - self.CONF.set_override("event_connection", "hbase://__test__", - group="database") - conn = storage.get_connection_from_config(self.CONF) - self.assertIsInstance(conn, impl_log.Connection) - conn = storage.get_connection_from_config(self.CONF, 'event') - self.assertIsInstance(conn, impl_hbase_event.Connection) def test_sqlalchemy_driver(self): self.CONF.set_override("connection", "sqlite+pysqlite://", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_sqlalchemy.Connection) - conn = storage.get_connection_from_config(self.CONF, 'metering') - self.assertIsInstance(conn, impl_sqlalchemy.Connection) diff --git a/ceilometer/tests/unit/storage/test_models.py b/ceilometer/tests/unit/storage/test_models.py deleted file mode 100644 index 9790d241..00000000 --- a/ceilometer/tests/unit/storage/test_models.py +++ /dev/null @@ -1,94 +0,0 @@ -# -# Copyright 2013 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime - -from oslotest import base as testbase -import six - -from ceilometer.event.storage import models as event_models -from ceilometer.storage import base -from ceilometer.storage import models - - -class FakeModel(base.Model): - def __init__(self, arg1, arg2): - base.Model.__init__(self, arg1=arg1, arg2=arg2) - - -class ModelTest(testbase.BaseTestCase): - - def test_create_attributes(self): - m = FakeModel(1, 2) - self.assertEqual(1, m.arg1) - self.assertEqual(2, m.arg2) - - def test_as_dict(self): - m = FakeModel(1, 2) - d = m.as_dict() - self.assertEqual({'arg1': 1, 'arg2': 2}, d) - - def test_as_dict_recursive(self): - m = FakeModel(1, FakeModel('a', 'b')) - d = m.as_dict() - self.assertEqual({'arg1': 1, - 'arg2': {'arg1': 'a', - 'arg2': 'b'}}, - d) - - def test_as_dict_recursive_list(self): - m = FakeModel(1, [FakeModel('a', 'b')]) - d = m.as_dict() - self.assertEqual({'arg1': 1, - 'arg2': [{'arg1': 'a', - 'arg2': 'b'}]}, - d) - - def test_event_repr_no_traits(self): - x = event_models.Event("1", "name", "now", None, {}) - self.assertEqual("", repr(x)) - - def test_get_field_names_of_sample(self): - sample_fields = ["source", "counter_name", "counter_type", - "counter_unit", "counter_volume", "user_id", - "project_id", "resource_id", "timestamp", - "resource_metadata", "message_id", - "message_signature", "recorded_at"] - - self.assertEqual(set(sample_fields), - set(models.Sample.get_field_names())) - - -class TestTraitModel(testbase.BaseTestCase): - - def test_convert_value(self): - v = event_models.Trait.convert_value( - event_models.Trait.INT_TYPE, '10') - self.assertEqual(10, v) - self.assertIsInstance(v, int) - v = event_models.Trait.convert_value( - event_models.Trait.FLOAT_TYPE, '10') - self.assertEqual(10.0, v) - self.assertIsInstance(v, float) - - v = event_models.Trait.convert_value( - event_models.Trait.DATETIME_TYPE, '2013-08-08 21:05:37.123456') - self.assertEqual(datetime.datetime(2013, 8, 8, 21, 5, 37, 123456), v) - self.assertIsInstance(v, datetime.datetime) - - v = event_models.Trait.convert_value( - event_models.Trait.TEXT_TYPE, 10) - self.assertEqual("10", v) - self.assertIsInstance(v, six.text_type) diff --git a/ceilometer/tests/unit/telemetry/__init__.py b/ceilometer/tests/unit/telemetry/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/telemetry/test_notifications.py b/ceilometer/tests/unit/telemetry/test_notifications.py deleted file mode 100644 index 292c7cd0..00000000 --- a/ceilometer/tests/unit/telemetry/test_notifications.py +++ /dev/null @@ -1,81 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslotest import base - -from ceilometer.telemetry import notifications - -NOTIFICATION = { - u'_context_domain': None, - u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d', - 'event_type': u'telemetry.api', - 'timestamp': u'2015-06-1909: 19: 35.786893', - u'_context_auth_token': None, - u'_context_read_only': False, - 'payload': {'samples': - [{'counter_name': u'instance100', - u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', - u'resource_id': u'instance', - u'timestamp': u'2015-06-19T09: 19: 35.785330', - u'message_signature': u'fake_signature1', - u'resource_metadata': {u'foo': u'bar'}, - u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', - u'counter_unit': u'instance', - u'counter_volume': 1.0, - u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', - u'message_id': u'4d865c6e-1664-11e5-9d41-0819a6cff905', - u'counter_type': u'gauge'}, - {u'counter_name': u'instance100', - u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2', - u'resource_id': u'instance', - u'timestamp': u'2015-06-19T09: 19: 35.785330', - u'message_signature': u'fake_signature12', - u'resource_metadata': {u'foo': u'bar'}, - u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack', - u'counter_unit': u'instance', - u'counter_volume': 1.0, - u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2', - u'message_id': u'4d866da8-1664-11e5-9d41-0819a6cff905', - u'counter_type': u'gauge'}]}, - u'_context_resource_uuid': None, - u'_context_user_identity': u'fake_user_identity---', - u'_context_show_deleted': False, - u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2', - 'priority': 'info', - u'_context_is_admin': True, - u'_context_project_domain': None, - u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2', - u'_context_user_domain': None, - 'publisher_id': u'ceilometer.api', - 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' -} - - -class TelemetryIpcTestCase(base.BaseTestCase): - - def test_process_notification(self): - sample_creation = notifications.TelemetryIpc(None) - samples = list(sample_creation.process_notification(NOTIFICATION)) - self.assertEqual(2, len(samples)) - payload = NOTIFICATION["payload"]['samples'] - for index, sample in enumerate(samples): - self.assertEqual(payload[index]["user_id"], sample.user_id) - self.assertEqual(payload[index]["counter_name"], sample.name) - self.assertEqual(payload[index]["resource_id"], sample.resource_id) - self.assertEqual(payload[index]["timestamp"], sample.timestamp) - self.assertEqual(payload[index]["resource_metadata"], - sample.resource_metadata) - self.assertEqual(payload[index]["counter_volume"], sample.volume) - self.assertEqual(payload[index]["source"], sample.source) - self.assertEqual(payload[index]["counter_type"], sample.type) - self.assertEqual(payload[index]["message_id"], sample.id) - self.assertEqual(payload[index]["counter_unit"], sample.unit) diff --git a/ceilometer/tests/unit/test_coordination.py b/ceilometer/tests/unit/test_coordination.py deleted file mode 100644 index 966946b1..00000000 --- a/ceilometer/tests/unit/test_coordination.py +++ /dev/null @@ -1,283 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging - -import mock -from oslo_config import fixture as fixture_config -import tooz.coordination - -from ceilometer import coordination -from ceilometer.tests import base -from ceilometer import utils - - -class MockToozCoordinator(object): - def __init__(self, member_id, shared_storage): - self._member_id = member_id - self._groups = shared_storage - self.is_started = False - - def start(self): - self.is_started = True - - def stop(self): - pass - - def heartbeat(self): - pass - - def create_group(self, group_id): - if group_id in self._groups: - return MockAsyncError( - tooz.coordination.GroupAlreadyExist(group_id)) - self._groups[group_id] = {} - return MockAsyncResult(None) - - def join_group(self, group_id, capabilities=b''): - if group_id not in self._groups: - return MockAsyncError( - tooz.coordination.GroupNotCreated(group_id)) - if self._member_id in self._groups[group_id]: - return MockAsyncError( - tooz.coordination.MemberAlreadyExist(group_id, - self._member_id)) - self._groups[group_id][self._member_id] = { - "capabilities": capabilities, - } - return MockAsyncResult(None) - - def leave_group(self, group_id): - return MockAsyncResult(None) - - def get_members(self, group_id): - if group_id not in self._groups: - return MockAsyncError( - tooz.coordination.GroupNotCreated(group_id)) - return MockAsyncResult(self._groups[group_id]) - - -class MockToozCoordExceptionRaiser(MockToozCoordinator): - def start(self): - raise tooz.coordination.ToozError('error') - - def heartbeat(self): - raise tooz.coordination.ToozError('error') - - def join_group(self, group_id, capabilities=b''): - raise tooz.coordination.ToozError('error') - - def get_members(self, group_id): - raise tooz.coordination.ToozError('error') - - -class MockToozCoordExceptionOnJoinRaiser(MockToozCoordinator): - def __init__(self, member_id, shared_storage, retry_count=None): - super(MockToozCoordExceptionOnJoinRaiser, - self).__init__(member_id, shared_storage) - self.tooz_error_count = retry_count - self.count = 0 - - def join_group(self, group_id, capabilities=b''): - if self.count == self.tooz_error_count: - return MockAsyncResult(None) - else: - self.count += 1 - raise tooz.coordination.ToozError('error') - - -class MockAsyncResult(tooz.coordination.CoordAsyncResult): - def __init__(self, result): - self.result = result - - def get(self, timeout=0): - return self.result - - @staticmethod - def done(): - return True - - -class MockAsyncError(tooz.coordination.CoordAsyncResult): - def __init__(self, error): - self.error = error - - def get(self, timeout=0): - raise self.error - - @staticmethod - def done(): - return True - - -class MockLoggingHandler(logging.Handler): - """Mock logging handler to check for expected logs.""" - - def __init__(self, *args, **kwargs): - self.reset() - logging.Handler.__init__(self, *args, **kwargs) - - def emit(self, record): - self.messages[record.levelname.lower()].append(record.getMessage()) - - def reset(self): - self.messages = {'debug': [], - 'info': [], - 'warning': [], - 'error': [], - 'critical': []} - - -class TestPartitioning(base.BaseTestCase): - - def setUp(self): - super(TestPartitioning, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.str_handler = MockLoggingHandler() - coordination.LOG.logger.addHandler(self.str_handler) - self.shared_storage = {} - - def _get_new_started_coordinator(self, shared_storage, agent_id=None, - coordinator_cls=None, retry_count=None, - cleanup_stop=True): - coordinator_cls = coordinator_cls or MockToozCoordinator - self.CONF.set_override('backend_url', 'xxx://yyy', - group='coordination') - with mock.patch('tooz.coordination.get_coordinator', - lambda _, member_id: - coordinator_cls(member_id, shared_storage, - retry_count) if retry_count else - coordinator_cls(member_id, shared_storage)): - pc = coordination.PartitionCoordinator(agent_id) - pc.start() - if cleanup_stop: - self.addCleanup(pc.stop) - return pc - - def _usage_simulation(self, *agents_kwargs): - partition_coordinators = [] - for kwargs in agents_kwargs: - partition_coordinator = self._get_new_started_coordinator( - self.shared_storage, kwargs['agent_id'], kwargs.get( - 'coordinator_cls')) - partition_coordinator.join_group(kwargs['group_id']) - partition_coordinators.append(partition_coordinator) - - for i, kwargs in enumerate(agents_kwargs): - all_resources = kwargs.get('all_resources', []) - expected_resources = kwargs.get('expected_resources', []) - actual_resources = partition_coordinators[i].extract_my_subset( - kwargs['group_id'], all_resources) - self.assertEqual(expected_resources, actual_resources) - - def test_single_group(self): - agents = [dict(agent_id='agent1', group_id='group'), - dict(agent_id='agent2', group_id='group')] - self._usage_simulation(*agents) - - self.assertEqual(['group'], sorted(self.shared_storage.keys())) - self.assertEqual(['agent1', 'agent2'], - sorted(self.shared_storage['group'].keys())) - - def test_multiple_groups(self): - agents = [dict(agent_id='agent1', group_id='group1'), - dict(agent_id='agent2', group_id='group2')] - self._usage_simulation(*agents) - - self.assertEqual(['group1', 'group2'], - sorted(self.shared_storage.keys())) - - def test_partitioning(self): - all_resources = ['resource_%s' % i for i in range(1000)] - agents = ['agent_%s' % i for i in range(10)] - - expected_resources = [list() for _ in range(len(agents))] - hr = utils.HashRing(agents) - for r in all_resources: - key = agents.index(hr.get_node(r)) - expected_resources[key].append(r) - - agents_kwargs = [] - for i, agent in enumerate(agents): - agents_kwargs.append(dict(agent_id=agent, - group_id='group', - all_resources=all_resources, - expected_resources=expected_resources[i])) - self._usage_simulation(*agents_kwargs) - - def test_coordination_backend_offline(self): - agents = [dict(agent_id='agent1', - group_id='group', - all_resources=['res1', 'res2'], - expected_resources=[], - coordinator_cls=MockToozCoordExceptionRaiser)] - self._usage_simulation(*agents) - expected_errors = ['Error getting group membership info from ' - 'coordination backend.', - 'Error connecting to coordination backend.'] - for e in expected_errors: - self.assertIn(e, self.str_handler.messages['error']) - - def test_coordination_backend_connection_fail_on_join(self): - coord = self._get_new_started_coordinator( - {'group'}, 'agent1', MockToozCoordExceptionOnJoinRaiser, - retry_count=2) - with mock.patch('tooz.coordination.get_coordinator', - return_value=MockToozCoordExceptionOnJoinRaiser): - coord.join_group(group_id='group') - - expected_errors = ['Error joining partitioning group group,' - ' re-trying', - 'Error joining partitioning group group,' - ' re-trying'] - self.assertEqual(expected_errors, self.str_handler.messages['error']) - - def test_reconnect(self): - coord = self._get_new_started_coordinator({}, 'a', - MockToozCoordExceptionRaiser) - with mock.patch('tooz.coordination.get_coordinator', - return_value=MockToozCoordExceptionRaiser('a', {})): - coord.heartbeat() - expected_errors = ['Error connecting to coordination backend.', - 'Error sending a heartbeat to coordination ' - 'backend.'] - for e in expected_errors: - self.assertIn(e, self.str_handler.messages['error']) - - self.str_handler.messages['error'] = [] - with mock.patch('tooz.coordination.get_coordinator', - return_value=MockToozCoordinator('a', {})): - coord.heartbeat() - for e in expected_errors: - self.assertNotIn(e, self.str_handler.messages['error']) - - def test_group_id_none(self): - coord = self._get_new_started_coordinator({}, 'a') - self.assertTrue(coord._coordinator.is_started) - - with mock.patch.object(coord._coordinator, 'join_group') as mocked: - coord.join_group(None) - self.assertEqual(0, mocked.call_count) - with mock.patch.object(coord._coordinator, 'leave_group') as mocked: - coord.leave_group(None) - self.assertEqual(0, mocked.call_count) - - def test_stop(self): - coord = self._get_new_started_coordinator({}, 'a', cleanup_stop=False) - self.assertTrue(coord._coordinator.is_started) - coord.join_group("123") - coord.stop() - self.assertIsEmpty(coord._groups) - self.assertIsNone(coord._coordinator) diff --git a/ceilometer/tests/unit/test_declarative.py b/ceilometer/tests/unit/test_declarative.py deleted file mode 100644 index 03b1e396..00000000 --- a/ceilometer/tests/unit/test_declarative.py +++ /dev/null @@ -1,48 +0,0 @@ -# -# Copyright 2016 Mirantis, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslotest import mockpatch - -from ceilometer import declarative -from ceilometer.tests import base - - -class TestDefinition(base.BaseTestCase): - - def setUp(self): - super(TestDefinition, self).setUp() - self.configs = [ - "_field1", - "_field2|_field3", - {'fields': 'field4.`split(., 1, 1)`'}, - {'fields': ['field5.arg', 'field6'], 'type': 'text'} - ] - self.parser = mock.MagicMock() - parser_patch = mockpatch.Patch( - "jsonpath_rw_ext.parser.ExtentedJsonPathParser.parse", - new=self.parser) - self.useFixture(parser_patch) - - def test_caching_parsers(self): - for config in self.configs * 2: - declarative.Definition("test", config, mock.MagicMock()) - self.assertEqual(4, self.parser.call_count) - self.parser.assert_has_calls([ - mock.call("_field1"), - mock.call("_field2|_field3"), - mock.call("field4.`split(., 1, 1)`"), - mock.call("(field5.arg)|(field6)"), - ]) diff --git a/ceilometer/tests/unit/test_decoupled_pipeline.py b/ceilometer/tests/unit/test_decoupled_pipeline.py deleted file mode 100644 index 283144af..00000000 --- a/ceilometer/tests/unit/test_decoupled_pipeline.py +++ /dev/null @@ -1,296 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import yaml - -from ceilometer import pipeline -from ceilometer import sample -from ceilometer.tests import pipeline_base - - -class TestDecoupledPipeline(pipeline_base.BasePipelineTestCase): - def _setup_pipeline_cfg(self): - source = {'name': 'test_source', - 'interval': 5, - 'counters': ['a'], - 'resources': [], - 'sinks': ['test_sink']} - sink = {'name': 'test_sink', - 'transformers': [{'name': 'update', 'parameters': {}}], - 'publishers': ['test://']} - self.pipeline_cfg = {'sources': [source], 'sinks': [sink]} - - def _augment_pipeline_cfg(self): - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'interval': 5, - 'counters': ['b'], - 'resources': [], - 'sinks': ['second_sink'] - }) - self.pipeline_cfg['sinks'].append({ - 'name': 'second_sink', - 'transformers': [{ - 'name': 'update', - 'parameters': - { - 'append_name': '_new', - } - }], - 'publishers': ['new'], - }) - - def _break_pipeline_cfg(self): - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'interval': 5, - 'counters': ['b'], - 'resources': [], - 'sinks': ['second_sink'] - }) - self.pipeline_cfg['sinks'].append({ - 'name': 'second_sink', - 'transformers': [{ - 'name': 'update', - 'parameters': - { - 'append_name': '_new', - } - }], - 'publishers': ['except'], - }) - - def _dup_pipeline_name_cfg(self): - self.pipeline_cfg['sources'].append({ - 'name': 'test_source', - 'interval': 5, - 'counters': ['b'], - 'resources': [], - 'sinks': ['test_sink'] - }) - - def _set_pipeline_cfg(self, field, value): - if field in self.pipeline_cfg['sources'][0]: - self.pipeline_cfg['sources'][0][field] = value - else: - self.pipeline_cfg['sinks'][0][field] = value - - def _extend_pipeline_cfg(self, field, value): - if field in self.pipeline_cfg['sources'][0]: - self.pipeline_cfg['sources'][0][field].extend(value) - else: - self.pipeline_cfg['sinks'][0][field].extend(value) - - def _unset_pipeline_cfg(self, field): - if field in self.pipeline_cfg['sources'][0]: - del self.pipeline_cfg['sources'][0][field] - else: - del self.pipeline_cfg['sinks'][0][field] - - def test_source_no_sink(self): - del self.pipeline_cfg['sinks'] - self._exception_create_pipelinemanager() - - def test_source_no_meters_or_counters(self): - del self.pipeline_cfg['sources'][0]['counters'] - self._exception_create_pipelinemanager() - - def test_source_dangling_sink(self): - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'interval': 5, - 'counters': ['b'], - 'resources': [], - 'sinks': ['second_sink'] - }) - self._exception_create_pipelinemanager() - - def test_sink_no_source(self): - del self.pipeline_cfg['sources'] - self._exception_create_pipelinemanager() - - def test_source_with_multiple_sinks(self): - counter_cfg = ['a', 'b'] - self._set_pipeline_cfg('counters', counter_cfg) - self.pipeline_cfg['sinks'].append({ - 'name': 'second_sink', - 'transformers': [{ - 'name': 'update', - 'parameters': - { - 'append_name': '_new', - } - }], - 'publishers': ['new'], - }) - self.pipeline_cfg['sources'][0]['sinks'].append('second_sink') - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.test_counter = sample.Sample( - name='b', - type=self.test_counter.type, - volume=self.test_counter.volume, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - ) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.assertEqual(2, len(pipeline_manager.pipelines)) - self.assertEqual('test_source:test_sink', - str(pipeline_manager.pipelines[0])) - self.assertEqual('test_source:second_sink', - str(pipeline_manager.pipelines[1])) - test_publisher = pipeline_manager.pipelines[0].publishers[0] - new_publisher = pipeline_manager.pipelines[1].publishers[0] - for publisher, sfx in [(test_publisher, '_update'), - (new_publisher, '_new')]: - self.assertEqual(2, len(publisher.samples)) - self.assertEqual(2, publisher.calls) - self.assertEqual('a' + sfx, getattr(publisher.samples[0], "name")) - self.assertEqual('b' + sfx, getattr(publisher.samples[1], "name")) - - def test_multiple_sources_with_single_sink(self): - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'interval': 5, - 'counters': ['b'], - 'resources': [], - 'sinks': ['test_sink'] - }) - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager) - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.test_counter = sample.Sample( - name='b', - type=self.test_counter.type, - volume=self.test_counter.volume, - unit=self.test_counter.unit, - user_id=self.test_counter.user_id, - project_id=self.test_counter.project_id, - resource_id=self.test_counter.resource_id, - timestamp=self.test_counter.timestamp, - resource_metadata=self.test_counter.resource_metadata, - ) - - with pipeline_manager.publisher() as p: - p([self.test_counter]) - - self.assertEqual(2, len(pipeline_manager.pipelines)) - self.assertEqual('test_source:test_sink', - str(pipeline_manager.pipelines[0])) - self.assertEqual('second_source:test_sink', - str(pipeline_manager.pipelines[1])) - test_publisher = pipeline_manager.pipelines[0].publishers[0] - another_publisher = pipeline_manager.pipelines[1].publishers[0] - for publisher in [test_publisher, another_publisher]: - self.assertEqual(2, len(publisher.samples)) - self.assertEqual(2, publisher.calls) - self.assertEqual('a_update', getattr(publisher.samples[0], "name")) - self.assertEqual('b_update', getattr(publisher.samples[1], "name")) - - transformed_samples = self.TransformerClass.samples - self.assertEqual(2, len(transformed_samples)) - self.assertEqual(['a', 'b'], - [getattr(s, 'name') for s in transformed_samples]) - - def _do_test_rate_of_change_in_boilerplate_pipeline_cfg(self, index, - meters, units): - with open('etc/ceilometer/pipeline.yaml') as fap: - data = fap.read() - pipeline_cfg = yaml.safe_load(data) - for s in pipeline_cfg['sinks']: - s['publishers'] = ['test://'] - pipeline_manager = pipeline.PipelineManager(pipeline_cfg, - self.transformer_manager) - pipe = pipeline_manager.pipelines[index] - self._do_test_rate_of_change_mapping(pipe, meters, units) - - def test_rate_of_change_boilerplate_disk_read_cfg(self): - meters = ('disk.read.bytes', 'disk.read.requests') - units = ('B', 'request') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, - meters, - units) - - def test_rate_of_change_boilerplate_disk_write_cfg(self): - meters = ('disk.write.bytes', 'disk.write.requests') - units = ('B', 'request') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, - meters, - units) - - def test_rate_of_change_boilerplate_network_incoming_cfg(self): - meters = ('network.incoming.bytes', 'network.incoming.packets') - units = ('B', 'packet') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(4, - meters, - units) - - def test_rate_of_change_boilerplate_per_disk_device_read_cfg(self): - meters = ('disk.device.read.bytes', 'disk.device.read.requests') - units = ('B', 'request') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, - meters, - units) - - def test_rate_of_change_boilerplate_per_disk_device_write_cfg(self): - meters = ('disk.device.write.bytes', 'disk.device.write.requests') - units = ('B', 'request') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(3, - meters, - units) - - def test_rate_of_change_boilerplate_network_outgoing_cfg(self): - meters = ('network.outgoing.bytes', 'network.outgoing.packets') - units = ('B', 'packet') - self._do_test_rate_of_change_in_boilerplate_pipeline_cfg(4, - meters, - units) - - def test_duplicated_sinks_names(self): - self.pipeline_cfg['sinks'].append({ - 'name': 'test_sink', - 'publishers': ['except'], - }) - self.assertRaises(pipeline.PipelineException, - pipeline.PipelineManager, - self.pipeline_cfg, - self.transformer_manager) - - def test_duplicated_source_names(self): - self.pipeline_cfg['sources'].append({ - 'name': 'test_source', - 'interval': 5, - 'counters': ['a'], - 'resources': [], - 'sinks': ['test_sink'] - }) - self.assertRaises(pipeline.PipelineException, - pipeline.PipelineManager, - self.pipeline_cfg, - self.transformer_manager) diff --git a/ceilometer/tests/unit/test_event_pipeline.py b/ceilometer/tests/unit/test_event_pipeline.py deleted file mode 100644 index 5c88b4f3..00000000 --- a/ceilometer/tests/unit/test_event_pipeline.py +++ /dev/null @@ -1,410 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import traceback -import uuid - -import mock -from oslo_config import fixture as fixture_config -import oslo_messaging -from oslotest import base -from oslotest import mockpatch - -from ceilometer.event.storage import models -from ceilometer import pipeline -from ceilometer import publisher -from ceilometer.publisher import test as test_publisher -from ceilometer.publisher import utils - - -class EventPipelineTestCase(base.BaseTestCase): - - def get_publisher(self, url, namespace=''): - fake_drivers = {'test://': test_publisher.TestPublisher, - 'new://': test_publisher.TestPublisher, - 'except://': self.PublisherClassException} - return fake_drivers[url](url) - - class PublisherClassException(publisher.PublisherBase): - def publish_samples(self, samples): - pass - - def publish_events(self, events): - raise Exception() - - def setUp(self): - super(EventPipelineTestCase, self).setUp() - self.p_type = pipeline.EVENT_TYPE - self.transformer_manager = None - - self.test_event = models.Event( - message_id=uuid.uuid4(), - event_type='a', - generated=datetime.datetime.utcnow(), - traits=[ - models.Trait('t_text', 1, 'text_trait'), - models.Trait('t_int', 2, 'int_trait'), - models.Trait('t_float', 3, 'float_trait'), - models.Trait('t_datetime', 4, 'datetime_trait') - ], - raw={'status': 'started'} - ) - - self.test_event2 = models.Event( - message_id=uuid.uuid4(), - event_type='b', - generated=datetime.datetime.utcnow(), - traits=[ - models.Trait('t_text', 1, 'text_trait'), - models.Trait('t_int', 2, 'int_trait'), - models.Trait('t_float', 3, 'float_trait'), - models.Trait('t_datetime', 4, 'datetime_trait') - ], - raw={'status': 'stopped'} - ) - - self.useFixture(mockpatch.PatchObject( - publisher, 'get_publisher', side_effect=self.get_publisher)) - - self._setup_pipeline_cfg() - - self._reraise_exception = True - self.useFixture(mockpatch.Patch( - 'ceilometer.pipeline.LOG.exception', - side_effect=self._handle_reraise_exception)) - - def _handle_reraise_exception(self, msg): - if self._reraise_exception: - raise Exception(traceback.format_exc()) - - def _setup_pipeline_cfg(self): - """Setup the appropriate form of pipeline config.""" - source = {'name': 'test_source', - 'events': ['a'], - 'sinks': ['test_sink']} - sink = {'name': 'test_sink', - 'publishers': ['test://']} - self.pipeline_cfg = {'sources': [source], 'sinks': [sink]} - - def _augment_pipeline_cfg(self): - """Augment the pipeline config with an additional element.""" - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'events': ['b'], - 'sinks': ['second_sink'] - }) - self.pipeline_cfg['sinks'].append({ - 'name': 'second_sink', - 'publishers': ['new://'], - }) - - def _break_pipeline_cfg(self): - """Break the pipeline config with a malformed element.""" - self.pipeline_cfg['sources'].append({ - 'name': 'second_source', - 'events': ['b'], - 'sinks': ['second_sink'] - }) - self.pipeline_cfg['sinks'].append({ - 'name': 'second_sink', - 'publishers': ['except'], - }) - - def _dup_pipeline_name_cfg(self): - """Break the pipeline config with duplicate pipeline name.""" - self.pipeline_cfg['sources'].append({ - 'name': 'test_source', - 'events': ['a'], - 'sinks': ['test_sink'] - }) - - def _set_pipeline_cfg(self, field, value): - if field in self.pipeline_cfg['sources'][0]: - self.pipeline_cfg['sources'][0][field] = value - else: - self.pipeline_cfg['sinks'][0][field] = value - - def _extend_pipeline_cfg(self, field, value): - if field in self.pipeline_cfg['sources'][0]: - self.pipeline_cfg['sources'][0][field].extend(value) - else: - self.pipeline_cfg['sinks'][0][field].extend(value) - - def _unset_pipeline_cfg(self, field): - if field in self.pipeline_cfg['sources'][0]: - del self.pipeline_cfg['sources'][0][field] - else: - del self.pipeline_cfg['sinks'][0][field] - - def _exception_create_pipelinemanager(self): - self.assertRaises(pipeline.PipelineException, - pipeline.PipelineManager, - self.pipeline_cfg, - self.transformer_manager, - self.p_type) - - def test_no_events(self): - self._unset_pipeline_cfg('events') - self._exception_create_pipelinemanager() - - def test_no_name(self): - self._unset_pipeline_cfg('name') - self._exception_create_pipelinemanager() - - def test_name(self): - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - for pipe in pipeline_manager.pipelines: - self.assertTrue(pipe.name.startswith('event:')) - - def test_no_publishers(self): - self._unset_pipeline_cfg('publishers') - self._exception_create_pipelinemanager() - - def test_check_events_include_exclude_same(self): - event_cfg = ['a', '!a'] - self._set_pipeline_cfg('events', event_cfg) - self._exception_create_pipelinemanager() - - def test_check_events_include_exclude(self): - event_cfg = ['a', '!b'] - self._set_pipeline_cfg('events', event_cfg) - self._exception_create_pipelinemanager() - - def test_check_events_wildcard_included(self): - event_cfg = ['a', '*'] - self._set_pipeline_cfg('events', event_cfg) - self._exception_create_pipelinemanager() - - def test_check_publishers_invalid_publisher(self): - publisher_cfg = ['test_invalid'] - self._set_pipeline_cfg('publishers', publisher_cfg) - - def test_multiple_included_events(self): - event_cfg = ['a', 'b'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - - with pipeline_manager.publisher() as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.events)) - - with pipeline_manager.publisher() as p: - p([self.test_event2]) - - self.assertEqual(2, len(publisher.events)) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - self.assertEqual('b', getattr(publisher.events[1], 'event_type')) - - def test_event_non_match(self): - event_cfg = ['nomatch'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher() as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(0, len(publisher.events)) - self.assertEqual(0, publisher.calls) - - def test_wildcard_event(self): - event_cfg = ['*'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher() as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.events)) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - - def test_wildcard_excluded_events(self): - event_cfg = ['*', '!a'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - self.assertFalse(pipeline_manager.pipelines[0].support_event('a')) - - def test_wildcard_excluded_events_not_excluded(self): - event_cfg = ['*', '!b'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher() as p: - p([self.test_event]) - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.events)) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - - def test_all_excluded_events_not_excluded(self): - event_cfg = ['!b', '!c'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher() as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.events)) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - - def test_all_excluded_events_excluded(self): - event_cfg = ['!a', '!c'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - self.assertFalse(pipeline_manager.pipelines[0].support_event('a')) - self.assertTrue(pipeline_manager.pipelines[0].support_event('b')) - self.assertFalse(pipeline_manager.pipelines[0].support_event('c')) - - def test_wildcard_and_excluded_wildcard_events(self): - event_cfg = ['*', '!compute.*'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - self.assertFalse(pipeline_manager.pipelines[0]. - support_event('compute.instance.create.start')) - self.assertTrue(pipeline_manager.pipelines[0]. - support_event('identity.user.create')) - - def test_included_event_and_wildcard_events(self): - event_cfg = ['compute.instance.create.start', 'identity.*'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - self.assertTrue(pipeline_manager.pipelines[0]. - support_event('identity.user.create')) - self.assertTrue(pipeline_manager.pipelines[0]. - support_event('compute.instance.create.start')) - self.assertFalse(pipeline_manager.pipelines[0]. - support_event('compute.instance.create.stop')) - - def test_excluded_event_and_excluded_wildcard_events(self): - event_cfg = ['!compute.instance.create.start', '!identity.*'] - self._set_pipeline_cfg('events', event_cfg) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - self.assertFalse(pipeline_manager.pipelines[0]. - support_event('identity.user.create')) - self.assertFalse(pipeline_manager.pipelines[0]. - support_event('compute.instance.create.start')) - self.assertTrue(pipeline_manager.pipelines[0]. - support_event('compute.instance.create.stop')) - - def test_multiple_pipeline(self): - self._augment_pipeline_cfg() - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher() as p: - p([self.test_event, self.test_event2]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - self.assertEqual(1, len(publisher.events)) - self.assertEqual(1, publisher.calls) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - new_publisher = pipeline_manager.pipelines[1].publishers[0] - self.assertEqual(1, len(new_publisher.events)) - self.assertEqual(1, new_publisher.calls) - self.assertEqual('b', getattr(new_publisher.events[0], 'event_type')) - - def test_multiple_publisher(self): - self._set_pipeline_cfg('publishers', ['test://', 'new://']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - - with pipeline_manager.publisher() as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[0] - new_publisher = pipeline_manager.pipelines[0].publishers[1] - self.assertEqual(1, len(publisher.events)) - self.assertEqual(1, len(new_publisher.events)) - self.assertEqual('a', getattr(new_publisher.events[0], 'event_type')) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - - def test_multiple_publisher_isolation(self): - self._reraise_exception = False - self._set_pipeline_cfg('publishers', ['except://', 'new://']) - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - with pipeline_manager.publisher() as p: - p([self.test_event]) - - publisher = pipeline_manager.pipelines[0].publishers[1] - self.assertEqual(1, len(publisher.events)) - self.assertEqual('a', getattr(publisher.events[0], 'event_type')) - - def test_unique_pipeline_names(self): - self._dup_pipeline_name_cfg() - self._exception_create_pipelinemanager() - - def test_event_pipeline_endpoint_requeue_on_failure(self): - self.CONF = self.useFixture(fixture_config.Config()).conf - self.CONF([]) - - self.CONF.set_override("ack_on_event_error", False, - group="notification") - self.CONF.set_override("telemetry_secret", "not-so-secret", - group="publisher") - test_data = { - 'message_id': uuid.uuid4(), - 'event_type': 'a', - 'generated': '2013-08-08 21:06:37.803826', - 'traits': [ - {'name': 't_text', - 'value': 1, - 'dtype': 'text_trait' - } - ], - 'raw': {'status': 'started'} - } - message_sign = utils.compute_signature(test_data, 'not-so-secret') - test_data['message_signature'] = message_sign - - fake_publisher = mock.Mock() - self.useFixture(mockpatch.Patch( - 'ceilometer.publisher.test.TestPublisher', - return_value=fake_publisher)) - - pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg, - self.transformer_manager, - self.p_type) - event_pipeline_endpoint = pipeline.EventPipelineEndpoint( - pipeline_manager.pipelines[0]) - - fake_publisher.publish_events.side_effect = Exception - ret = event_pipeline_endpoint.sample([ - {'ctxt': {}, 'publisher_id': 'compute.vagrant-precise', - 'event_type': 'a', 'payload': [test_data], 'metadata': {}}]) - self.assertEqual(oslo_messaging.NotificationResult.REQUEUE, ret) diff --git a/ceilometer/tests/unit/test_messaging.py b/ceilometer/tests/unit/test_messaging.py deleted file mode 100644 index 78595762..00000000 --- a/ceilometer/tests/unit/test_messaging.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import fixture as fixture_config -import oslo_messaging.conffixture -from oslotest import base - -from ceilometer import messaging - - -class MessagingTests(base.BaseTestCase): - def setUp(self): - super(MessagingTests, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.useFixture(oslo_messaging.conffixture.ConfFixture(self.CONF)) - - def test_get_transport_invalid_url(self): - self.assertRaises(oslo_messaging.InvalidTransportURL, - messaging.get_transport, "notvalid!") - - def test_get_transport_url_caching(self): - t1 = messaging.get_transport('fake://') - t2 = messaging.get_transport('fake://') - self.assertEqual(t1, t2) - - def test_get_transport_default_url_caching(self): - t1 = messaging.get_transport() - t2 = messaging.get_transport() - self.assertEqual(t1, t2) - - def test_get_transport_default_url_no_caching(self): - t1 = messaging.get_transport(cache=False) - t2 = messaging.get_transport(cache=False) - self.assertNotEqual(t1, t2) - - def test_get_transport_url_no_caching(self): - t1 = messaging.get_transport('fake://', cache=False) - t2 = messaging.get_transport('fake://', cache=False) - self.assertNotEqual(t1, t2) - - def test_get_transport_default_url_caching_mix(self): - t1 = messaging.get_transport() - t2 = messaging.get_transport(cache=False) - self.assertNotEqual(t1, t2) - - def test_get_transport_url_caching_mix(self): - t1 = messaging.get_transport('fake://') - t2 = messaging.get_transport('fake://', cache=False) - self.assertNotEqual(t1, t2) - - def test_get_transport_optional(self): - self.CONF.set_override('rpc_backend', '') - self.assertIsNone(messaging.get_transport(optional=True, - cache=False)) diff --git a/ceilometer/tests/unit/test_middleware.py b/ceilometer/tests/unit/test_middleware.py deleted file mode 100644 index 85aba8ec..00000000 --- a/ceilometer/tests/unit/test_middleware.py +++ /dev/null @@ -1,100 +0,0 @@ -# -# Copyright 2013-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import mock -from oslo_config import fixture as fixture_config - -from ceilometer import middleware -from ceilometer.tests import base - - -HTTP_REQUEST = { - u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'_context_is_admin': True, - u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': u'10.0.2.15', - u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T20:23:41.425105', - u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'event_type': u'http.request', - u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', - u'payload': {u'request': {'HTTP_X_FOOBAR': 'foobaz', - 'HTTP_X_USER_ID': 'jd-x32', - 'HTTP_X_PROJECT_ID': 'project-id', - 'HTTP_X_SERVICE_NAME': 'nova'}}, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 20:23:48.028195', -} - -HTTP_RESPONSE = { - u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2', - u'_context_is_admin': True, - u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e', - u'_context_quota_class': None, - u'_context_read_deleted': u'no', - u'_context_remote_address': u'10.0.2.15', - u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66', - u'_context_roles': [u'admin'], - u'_context_timestamp': u'2012-05-08T20:23:41.425105', - u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2', - u'event_type': u'http.response', - u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451', - u'payload': {u'request': {'HTTP_X_FOOBAR': 'foobaz', - 'HTTP_X_USER_ID': 'jd-x32', - 'HTTP_X_PROJECT_ID': 'project-id', - 'HTTP_X_SERVICE_NAME': 'nova'}, - u'response': {'status': '200 OK'}}, - u'priority': u'INFO', - u'publisher_id': u'compute.vagrant-precise', - u'timestamp': u'2012-05-08 20:23:48.028195', -} - - -class TestNotifications(base.BaseTestCase): - - def setUp(self): - super(TestNotifications, self).setUp() - self.CONF = self.useFixture(fixture_config.Config()).conf - self.setup_messaging(self.CONF) - - def test_process_request_notification(self): - sample = list(middleware.HTTPRequest(mock.Mock()).process_notification( - HTTP_REQUEST - ))[0] - self.assertEqual(HTTP_REQUEST['payload']['request']['HTTP_X_USER_ID'], - sample.user_id) - self.assertEqual(HTTP_REQUEST['payload']['request'] - ['HTTP_X_PROJECT_ID'], sample.project_id) - self.assertEqual(HTTP_REQUEST['payload']['request'] - ['HTTP_X_SERVICE_NAME'], sample.resource_id) - self.assertEqual(1, sample.volume) - - def test_process_response_notification(self): - sample = list(middleware.HTTPResponse( - mock.Mock()).process_notification(HTTP_RESPONSE))[0] - self.assertEqual(HTTP_RESPONSE['payload']['request']['HTTP_X_USER_ID'], - sample.user_id) - self.assertEqual(HTTP_RESPONSE['payload']['request'] - ['HTTP_X_PROJECT_ID'], sample.project_id) - self.assertEqual(HTTP_RESPONSE['payload']['request'] - ['HTTP_X_SERVICE_NAME'], sample.resource_id) - self.assertEqual(1, sample.volume) - - def test_targets(self): - targets = middleware.HTTPRequest(mock.Mock()).get_targets(self.CONF) - self.assertEqual(4, len(targets)) diff --git a/ceilometer/tests/unit/test_neutronclient.py b/ceilometer/tests/unit/test_neutronclient.py deleted file mode 100644 index 4bf61fc3..00000000 --- a/ceilometer/tests/unit/test_neutronclient.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright (C) 2014 eNovance SAS -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from oslotest import base - -from ceilometer import neutron_client - - -class TestNeutronClient(base.BaseTestCase): - - def setUp(self): - super(TestNeutronClient, self).setUp() - self.nc = neutron_client.Client() - self.nc.lb_version = 'v1' - - @staticmethod - def fake_ports_list(): - return {'ports': - [{'admin_state_up': True, - 'device_id': '674e553b-8df9-4321-87d9-93ba05b93558', - 'device_owner': 'network:router_gateway', - 'extra_dhcp_opts': [], - 'id': '96d49cc3-4e01-40ce-9cac-c0e32642a442', - 'mac_address': 'fa:16:3e:c5:35:93', - 'name': '', - 'network_id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'status': 'ACTIVE', - 'tenant_id': '89271fa581ab4380bf172f868c3615f9'}, - ]} - - def test_port_get_all(self): - with mock.patch.object(self.nc.client, 'list_ports', - side_effect=self.fake_ports_list): - ports = self.nc.port_get_all() - - self.assertEqual(1, len(ports)) - self.assertEqual('96d49cc3-4e01-40ce-9cac-c0e32642a442', - ports[0]['id']) - - @staticmethod - def fake_networks_list(): - return {'networks': - [{'admin_state_up': True, - 'id': '298a3088-a446-4d5a-bad8-f92ecacd786b', - 'name': 'public', - 'provider:network_type': 'gre', - 'provider:physical_network': None, - 'provider:segmentation_id': 2, - 'router:external': True, - 'shared': False, - 'status': 'ACTIVE', - 'subnets': [u'c4b6f5b8-3508-4896-b238-a441f25fb492'], - 'tenant_id': '62d6f08bbd3a44f6ad6f00ca15cce4e5'}, - ]} - - @staticmethod - def fake_pool_list(): - return {'pools': [{'status': 'ACTIVE', - 'lb_method': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': '', - 'health_monitors': [], - 'members': [], - 'status_description': None, - 'id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'vip_id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'mylb', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'health_monitors_status': []}, - ]} - - def test_pool_list(self): - with mock.patch.object(self.nc.client, 'list_pools', - side_effect=self.fake_pool_list): - pools = self.nc.pool_get_all() - - self.assertEqual(1, len(pools)) - self.assertEqual('ce73ad36-437d-4c84-aee1-186027d3da9a', - pools[0]['id']) - - @staticmethod - def fake_vip_list(): - return {'vips': [{'status': 'ACTIVE', - 'status_description': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a', - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'connection_limit': -1, - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'session_persistence': None, - 'address': '10.0.0.2', - 'protocol_port': 80, - 'port_id': '3df3c4de-b32e-4ca1-a7f4-84323ba5f291', - 'id': 'cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - 'name': 'myvip'}, - ]} - - def test_vip_list(self): - with mock.patch.object(self.nc.client, 'list_vips', - side_effect=self.fake_vip_list): - vips = self.nc.vip_get_all() - - self.assertEqual(1, len(vips)) - self.assertEqual('cd6a6fee-e2fa-4e6c-b3c2-bfbe395752c1', - vips[0]['id']) - - @staticmethod - def fake_member_list(): - return {'members': [{'status': 'ACTIVE', - 'protocol_port': 80, - 'weight': 1, - 'admin_state_up': True, - 'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa', - 'pool_id': 'ce73ad36-437d-4c84-aee1-186027d3da9a', - 'address': '10.0.0.3', - 'status_description': None, - 'id': '290b61eb-07bc-4372-9fbf-36459dd0f96b'}, - ]} - - def test_member_list(self): - with mock.patch.object(self.nc.client, 'list_members', - side_effect=self.fake_member_list): - members = self.nc.member_get_all() - - self.assertEqual(1, len(members)) - self.assertEqual('290b61eb-07bc-4372-9fbf-36459dd0f96b', - members[0]['id']) - - @staticmethod - def fake_monitors_list(): - return {'health_monitors': - [{'id': '34ae33e1-0035-49e2-a2ca-77d5d3fab365', - 'admin_state_up': True, - 'tenant_id': "d5d2817dae6b42159be9b665b64beb0e", - 'delay': 2, - 'max_retries': 5, - 'timeout': 5, - 'pools': [], - 'type': 'PING', - }]} - - def test_monitor_list(self): - with mock.patch.object(self.nc.client, 'list_health_monitors', - side_effect=self.fake_monitors_list): - monitors = self.nc.health_monitor_get_all() - - self.assertEqual(1, len(monitors)) - self.assertEqual('34ae33e1-0035-49e2-a2ca-77d5d3fab365', - monitors[0]['id']) - - @staticmethod - def fake_pool_stats(fake_pool): - return {'stats': - [{'active_connections': 1, - 'total_connections': 2, - 'bytes_in': 3, - 'bytes_out': 4 - }]} - - def test_pool_stats(self): - with mock.patch.object(self.nc.client, 'retrieve_pool_stats', - side_effect=self.fake_pool_stats): - stats = self.nc.pool_stats('fake_pool')['stats'] - - self.assertEqual(1, len(stats)) - self.assertEqual(1, stats[0]['active_connections']) - self.assertEqual(2, stats[0]['total_connections']) - self.assertEqual(3, stats[0]['bytes_in']) - self.assertEqual(4, stats[0]['bytes_out']) diff --git a/ceilometer/tests/unit/test_neutronclient_lbaas_v2.py b/ceilometer/tests/unit/test_neutronclient_lbaas_v2.py deleted file mode 100644 index 6b88aa5a..00000000 --- a/ceilometer/tests/unit/test_neutronclient_lbaas_v2.py +++ /dev/null @@ -1,336 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from neutronclient.v2_0 import client -from oslotest import base - -from ceilometer import neutron_client - - -class TestNeutronClientLBaaSV2(base.BaseTestCase): - - def setUp(self): - super(TestNeutronClientLBaaSV2, self).setUp() - self.nc = neutron_client.Client() - - @staticmethod - def fake_list_lbaas_pools(): - return { - 'pools': [{ - 'lb_algorithm': 'ROUND_ROBIN', - 'protocol': 'HTTP', - 'description': 'simple pool', - 'admin_state_up': True, - 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', - 'healthmonitor_id': None, - 'listeners': [{ - 'id': "35cb8516-1173-4035-8dae-0dae3453f37f" - } - ], - 'members': [{ - 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858'} - ], - 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', - 'name': 'pool1' - }] - } - - @staticmethod - def fake_list_lbaas_members(): - return { - 'members': [{ - 'weight': 1, - 'admin_state_up': True, - 'subnet_id': '013d3059-87a4-45a5-91e9-d721068ae0b2', - 'tenant_id': '1a3e005cf9ce40308c900bcb08e5320c', - 'address': '10.0.0.8', - 'protocol_port': 80, - 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858' - }] - } - - @staticmethod - def fake_list_lbaas_healthmonitors(): - return { - 'healthmonitors': [{ - 'admin_state_up': True, - 'tenant_id': '6f3584d5754048a18e30685362b88411', - 'delay': 1, - 'expected_codes': '200,201,202', - 'max_retries': 5, - 'http_method': 'GET', - 'timeout': 1, - 'pools': [{ - 'id': '74aa2010-a59f-4d35-a436-60a6da882819' - }], - 'url_path': '/index.html', - 'type': 'HTTP', - 'id': '0a9ac99d-0a09-4b18-8499-a0796850279a' - }] - } - - @staticmethod - def fake_show_listener(): - return { - 'listener': { - 'default_pool_id': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'loadbalancers': [{ - 'id': 'a9729389-6147-41a3-ab22-a24aed8692b2' - }], - 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', - 'connection_limit': 100, - 'protocol_port': 80, - 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', - 'name': '' - } - } - - @staticmethod - def fake_retrieve_loadbalancer_status(): - return { - 'statuses': { - 'loadbalancer': { - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE', - 'listeners': [{ - 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE', - 'pools': [{ - 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE', - 'members': [{ - 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE' - }], - 'healthmonitor': { - 'id': '785131d2-8f7b-4fee-a7e7-3196e11b4518', - 'provisioning_status': 'ACTIVE' - } - }] - }] - } - } - } - - @staticmethod - def fake_retrieve_loadbalancer_status_complex(): - return { - 'statuses': { - 'loadbalancer': { - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE', - 'listeners': [{ - 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE', - 'pools': [{ - 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE', - 'members': [{ - 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf858', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE' - }, - { - 'id': 'fcf23bde-8cf9-4616-883f-208cebcbf969', - 'operating_status': 'OFFLINE', - 'provisioning_status': 'ACTIVE' - }], - 'healthmonitor': { - 'id': '785131d2-8f7b-4fee-a7e7-3196e11b4518', - 'provisioning_status': 'ACTIVE' - } - }, - { - 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce6f6', - 'operating_status': 'OFFLINE', - 'provisioning_status': 'ACTIVE', - 'members': [{ - 'id': 'fcf23bde-8cf9-4616-883f-208cebcbfa7a', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE' - }], - 'healthmonitor': { - 'id': '785131d2-8f7b-4fee-a7e7-3196e11b4629', - 'provisioning_status': 'ACTIVE' - } - }] - }, - { - 'id': '35cb8516-1173-4035-8dae-0dae3453f48e', - 'operating_status': 'OFFLINE', - 'provisioning_status': 'ACTIVE', - 'pools': [{ - 'id': '4c0a0a5f-cf8f-44b7-b912-957daa8ce7g7', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE', - 'members': [{ - 'id': 'fcf23bde-8cf9-4616-883f-208cebcbfb8b', - 'operating_status': 'ONLINE', - 'provisioning_status': 'ACTIVE' - }], - 'healthmonitor': { - 'id': '785131d2-8f7b-4fee-a7e7-3196e11b473a', - 'provisioning_status': 'ACTIVE' - } - }] - }] - } - } - } - - @staticmethod - def fake_list_lbaas_listeners(): - return { - 'listeners': [{ - 'default_pool_id': None, - 'protocol': 'HTTP', - 'description': '', - 'admin_state_up': True, - 'loadbalancers': [{ - 'id': 'a9729389-6147-41a3-ab22-a24aed8692b2' - }], - 'tenant_id': '3e4d8bec50a845fcb09e03a4375c691d', - 'connection_limit': 100, - 'protocol_port': 80, - 'id': '35cb8516-1173-4035-8dae-0dae3453f37f', - 'name': 'listener_one' - }]} - - @mock.patch.object(client.Client, - 'list_lbaas_pools') - @mock.patch.object(client.Client, - 'show_listener') - @mock.patch.object(neutron_client.Client, - '_retrieve_loadbalancer_status_tree') - def test_list_pools_v2(self, mock_status, mock_show, mock_list): - mock_status.return_value = self.fake_retrieve_loadbalancer_status() - mock_show.return_value = self.fake_show_listener() - mock_list.return_value = self.fake_list_lbaas_pools() - pools = self.nc.list_pools_v2() - self.assertEqual(1, len(pools)) - for pool in pools: - self.assertEqual('ONLINE', pool['status']) - self.assertEqual('ROUND_ROBIN', pool['lb_method']) - - @mock.patch.object(client.Client, - 'list_lbaas_pools') - @mock.patch.object(client.Client, - 'list_lbaas_members') - @mock.patch.object(client.Client, - 'show_listener') - @mock.patch.object(neutron_client.Client, - '_retrieve_loadbalancer_status_tree') - def test_list_members_v2(self, mock_status, mock_show, mock_list_members, - mock_list_pools): - mock_status.return_value = self.fake_retrieve_loadbalancer_status() - mock_show.return_value = self.fake_show_listener() - mock_list_pools.return_value = self.fake_list_lbaas_pools() - mock_list_members.return_value = self.fake_list_lbaas_members() - members = self.nc.list_members_v2() - self.assertEqual(1, len(members)) - for member in members: - self.assertEqual('ONLINE', member['status']) - self.assertEqual('4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', - member['pool_id']) - - @mock.patch.object(client.Client, - 'list_lbaas_healthmonitors') - def test_list_health_monitors_v2(self, mock_list_healthmonitors): - mock_list_healthmonitors.return_value = ( - self.fake_list_lbaas_healthmonitors()) - healthmonitors = self.nc.list_health_monitors_v2() - self.assertEqual(1, len(healthmonitors)) - for healthmonitor in healthmonitors: - self.assertEqual(5, healthmonitor['max_retries']) - - @mock.patch.object(neutron_client.Client, - '_retrieve_loadbalancer_status_tree') - def test_get_member_status(self, mock_status): - mock_status.return_value = ( - self.fake_retrieve_loadbalancer_status_complex()) - loadbalancer_id = '5b1b1b6e-cf8f-44b7-b912-957daa8ce5e5' - listener_id = '35cb8516-1173-4035-8dae-0dae3453f37f' - pool_id = '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5' - parent_id = [listener_id, pool_id] - result_status = self.nc._get_member_status(loadbalancer_id, - parent_id) - expected_keys = ['fcf23bde-8cf9-4616-883f-208cebcbf858', - 'fcf23bde-8cf9-4616-883f-208cebcbf969'] - excepted_status = { - 'fcf23bde-8cf9-4616-883f-208cebcbf858': 'ONLINE', - 'fcf23bde-8cf9-4616-883f-208cebcbf969': 'OFFLINE'} - - for key in result_status.keys(): - self.assertIn(key, expected_keys) - self.assertEqual(excepted_status[key], result_status[key]) - - @mock.patch.object(neutron_client.Client, - '_retrieve_loadbalancer_status_tree') - def test_get_pool_status(self, mock_status): - mock_status.return_value = ( - self.fake_retrieve_loadbalancer_status_complex()) - loadbalancer_id = '5b1b1b6e-cf8f-44b7-b912-957daa8ce5e5' - parent_id = '35cb8516-1173-4035-8dae-0dae3453f37f' - result_status = self.nc._get_pool_status(loadbalancer_id, - parent_id) - expected_keys = ['4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5', - '4c0a0a5f-cf8f-44b7-b912-957daa8ce6f6'] - excepted_status = { - '4c0a0a5f-cf8f-44b7-b912-957daa8ce5e5': 'ONLINE', - '4c0a0a5f-cf8f-44b7-b912-957daa8ce6f6': 'OFFLINE'} - - for key in result_status.keys(): - self.assertIn(key, expected_keys) - self.assertEqual(excepted_status[key], result_status[key]) - - @mock.patch.object(neutron_client.Client, - '_retrieve_loadbalancer_status_tree') - def test_get_listener_status(self, mock_status): - mock_status.return_value = ( - self.fake_retrieve_loadbalancer_status_complex()) - loadbalancer_id = '5b1b1b6e-cf8f-44b7-b912-957daa8ce5e5' - result_status = self.nc._get_listener_status(loadbalancer_id) - expected_keys = ['35cb8516-1173-4035-8dae-0dae3453f37f', - '35cb8516-1173-4035-8dae-0dae3453f48e'] - excepted_status = { - '35cb8516-1173-4035-8dae-0dae3453f37f': 'ONLINE', - '35cb8516-1173-4035-8dae-0dae3453f48e': 'OFFLINE'} - - for key in result_status.keys(): - self.assertIn(key, expected_keys) - self.assertEqual(excepted_status[key], result_status[key]) - - @mock.patch.object(client.Client, - 'list_listeners') - @mock.patch.object(neutron_client.Client, - '_retrieve_loadbalancer_status_tree') - def test_list_listener(self, mock_status, mock_list_listeners): - mock_list_listeners.return_value = ( - self.fake_list_lbaas_listeners()) - mock_status.return_value = ( - self.fake_retrieve_loadbalancer_status()) - listeners = self.nc.list_listener() - expected_key = '35cb8516-1173-4035-8dae-0dae3453f37f' - expected_status = 'ONLINE' - self.assertEqual(1, len(listeners)) - self.assertEqual(expected_key, listeners[0]['id']) - self.assertEqual(expected_status, listeners[0]['operating_status']) diff --git a/ceilometer/tests/unit/test_novaclient.py b/ceilometer/tests/unit/test_novaclient.py deleted file mode 100644 index cb3fc847..00000000 --- a/ceilometer/tests/unit/test_novaclient.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright 2013-2014 eNovance -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import novaclient -from oslo_config import fixture as fixture_config -from oslotest import base -from oslotest import mockpatch - -from ceilometer import nova_client - - -class TestNovaClient(base.BaseTestCase): - - def setUp(self): - super(TestNovaClient, self).setUp() - self._flavors_count = 0 - self._images_count = 0 - self.nv = nova_client.Client() - self.useFixture(mockpatch.PatchObject( - self.nv.nova_client.flavors, 'get', - side_effect=self.fake_flavors_get)) - self.useFixture(mockpatch.PatchObject( - self.nv.nova_client.images, 'get', - side_effect=self.fake_images_get)) - self.CONF = self.useFixture(fixture_config.Config()).conf - - def fake_flavors_get(self, *args, **kwargs): - self._flavors_count += 1 - a = mock.MagicMock() - a.id = args[0] - if a.id == 1: - a.name = 'm1.tiny' - elif a.id == 2: - a.name = 'm1.large' - else: - raise novaclient.exceptions.NotFound('foobar') - return a - - def fake_images_get(self, *args, **kwargs): - self._images_count += 1 - a = mock.MagicMock() - a.id = args[0] - image_details = { - 1: ('ubuntu-12.04-x86', dict(kernel_id=11, ramdisk_id=21)), - 2: ('centos-5.4-x64', dict(kernel_id=12, ramdisk_id=22)), - 3: ('rhel-6-x64', None), - 4: ('rhel-6-x64', dict()), - 5: ('rhel-6-x64', dict(kernel_id=11)), - 6: ('rhel-6-x64', dict(ramdisk_id=21)) - } - - if a.id in image_details: - a.name = image_details[a.id][0] - a.metadata = image_details[a.id][1] - else: - raise novaclient.exceptions.NotFound('foobar') - - return a - - @staticmethod - def fake_servers_list(*args, **kwargs): - a = mock.MagicMock() - a.id = 42 - a.flavor = {'id': 1} - a.image = {'id': 1} - b = mock.MagicMock() - b.id = 43 - b.flavor = {'id': 2} - b.image = {'id': 2} - return [a, b] - - def test_instance_get_all_by_host(self): - with mock.patch.object(self.nv.nova_client.servers, 'list', - side_effect=self.fake_servers_list): - instances = self.nv.instance_get_all_by_host('foobar') - - self.assertEqual(2, len(instances)) - self.assertEqual('m1.tiny', instances[0].flavor['name']) - self.assertEqual('ubuntu-12.04-x86', instances[0].image['name']) - self.assertEqual(11, instances[0].kernel_id) - self.assertEqual(21, instances[0].ramdisk_id) - - def test_instance_get_all(self): - with mock.patch.object(self.nv.nova_client.servers, 'list', - side_effect=self.fake_servers_list): - instances = self.nv.instance_get_all() - - self.assertEqual(2, len(instances)) - self.assertEqual(42, instances[0].id) - self.assertEqual(1, instances[0].flavor['id']) - self.assertEqual(1, instances[0].image['id']) - - @staticmethod - def fake_servers_list_unknown_flavor(*args, **kwargs): - a = mock.MagicMock() - a.id = 42 - a.flavor = {'id': 666} - a.image = {'id': 1} - return [a] - - def test_instance_get_all_by_host_unknown_flavor(self): - with mock.patch.object( - self.nv.nova_client.servers, 'list', - side_effect=self.fake_servers_list_unknown_flavor): - instances = self.nv.instance_get_all_by_host('foobar') - - self.assertEqual(1, len(instances)) - self.assertEqual('unknown-id-666', instances[0].flavor['name']) - - @staticmethod - def fake_servers_list_unknown_image(*args, **kwargs): - a = mock.MagicMock() - a.id = 42 - a.flavor = {'id': 1} - a.image = {'id': 666} - return [a] - - @staticmethod - def fake_servers_list_image_missing_metadata(*args, **kwargs): - a = mock.MagicMock() - a.id = 42 - a.flavor = {'id': 1} - a.image = {'id': args[0]} - return [a] - - @staticmethod - def fake_instance_image_missing(*args, **kwargs): - a = mock.MagicMock() - a.id = 42 - a.flavor = {'id': 666} - a.image = None - return [a] - - def test_instance_get_all_by_host_unknown_image(self): - with mock.patch.object( - self.nv.nova_client.servers, 'list', - side_effect=self.fake_servers_list_unknown_image): - instances = self.nv.instance_get_all_by_host('foobar') - - self.assertEqual(1, len(instances)) - self.assertEqual('unknown-id-666', instances[0].image['name']) - - def test_with_flavor_and_image(self): - results = self.nv._with_flavor_and_image(self.fake_servers_list()) - instance = results[0] - self.assertEqual(2, len(results)) - self.assertEqual('ubuntu-12.04-x86', instance.image['name']) - self.assertEqual('m1.tiny', instance.flavor['name']) - self.assertEqual(11, instance.kernel_id) - self.assertEqual(21, instance.ramdisk_id) - - def test_with_flavor_and_image_unknown_image(self): - instances = self.fake_servers_list_unknown_image() - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertEqual('unknown-id-666', instance.image['name']) - self.assertNotEqual(instance.flavor['name'], 'unknown-id-666') - self.assertIsNone(instance.kernel_id) - self.assertIsNone(instance.ramdisk_id) - - def test_with_flavor_and_image_unknown_flavor(self): - instances = self.fake_servers_list_unknown_flavor() - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertEqual('unknown-id-666', instance.flavor['name']) - self.assertEqual(0, instance.flavor['vcpus']) - self.assertEqual(0, instance.flavor['ram']) - self.assertEqual(0, instance.flavor['disk']) - self.assertNotEqual(instance.image['name'], 'unknown-id-666') - self.assertEqual(11, instance.kernel_id) - self.assertEqual(21, instance.ramdisk_id) - - def test_with_flavor_and_image_none_metadata(self): - instances = self.fake_servers_list_image_missing_metadata(3) - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertIsNone(instance.kernel_id) - self.assertIsNone(instance.ramdisk_id) - - def test_with_flavor_and_image_missing_metadata(self): - instances = self.fake_servers_list_image_missing_metadata(4) - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertIsNone(instance.kernel_id) - self.assertIsNone(instance.ramdisk_id) - - def test_with_flavor_and_image_missing_ramdisk(self): - instances = self.fake_servers_list_image_missing_metadata(5) - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertEqual(11, instance.kernel_id) - self.assertIsNone(instance.ramdisk_id) - - def test_with_flavor_and_image_missing_kernel(self): - instances = self.fake_servers_list_image_missing_metadata(6) - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertIsNone(instance.kernel_id) - self.assertEqual(21, instance.ramdisk_id) - - def test_with_flavor_and_image_no_cache(self): - results = self.nv._with_flavor_and_image(self.fake_servers_list()) - self.assertEqual(2, len(results)) - self.assertEqual(2, self._flavors_count) - self.assertEqual(2, self._images_count) - - def test_with_flavor_and_image_cache(self): - results = self.nv._with_flavor_and_image(self.fake_servers_list() * 2) - self.assertEqual(4, len(results)) - self.assertEqual(2, self._flavors_count) - self.assertEqual(2, self._images_count) - - def test_with_flavor_and_image_unknown_image_cache(self): - instances = self.fake_servers_list_unknown_image() - results = self.nv._with_flavor_and_image(instances * 2) - self.assertEqual(2, len(results)) - self.assertEqual(1, self._flavors_count) - self.assertEqual(1, self._images_count) - for instance in results: - self.assertEqual('unknown-id-666', instance.image['name']) - self.assertNotEqual(instance.flavor['name'], 'unknown-id-666') - self.assertIsNone(instance.kernel_id) - self.assertIsNone(instance.ramdisk_id) - - def test_with_missing_image_instance(self): - instances = self.fake_instance_image_missing() - results = self.nv._with_flavor_and_image(instances) - instance = results[0] - self.assertIsNone(instance.kernel_id) - self.assertIsNone(instance.image) - self.assertIsNone(instance.ramdisk_id) - - def test_with_nova_http_log_debug(self): - self.CONF.set_override("nova_http_log_debug", True) - self.nv = nova_client.Client() - self.assertIsNotNone(self.nv.nova_client.client.logger) diff --git a/ceilometer/tests/unit/test_sample.py b/ceilometer/tests/unit/test_sample.py deleted file mode 100644 index b64e6709..00000000 --- a/ceilometer/tests/unit/test_sample.py +++ /dev/null @@ -1,68 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for ceilometer/sample.py""" - -import datetime - -from ceilometer import sample -from ceilometer.tests import base - - -class TestSample(base.BaseTestCase): - SAMPLE = sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - unit='ns', - volume='1234567', - user_id='56c5692032f34041900342503fecab30', - project_id='ac9494df2d9d4e709bac378cceabaf23', - resource_id='1ca738a1-c49c-4401-8346-5c60ebdb03f4', - timestamp=datetime.datetime(2014, 10, 29, 14, 12, 15, 485877), - resource_metadata={} - ) - - def test_sample_string_format(self): - expected = ('') - self.assertEqual(expected, str(self.SAMPLE)) - - def test_sample_from_notifications_list(self): - msg = { - 'event_type': u'sample.create', - 'timestamp': u'2015-06-1909: 19: 35.786893', - 'payload': [{u'counter_name': u'instance100'}], - 'priority': 'info', - 'publisher_id': u'ceilometer.api', - 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' - } - s = sample.Sample.from_notification( - 'sample', 'type', 1.0, '%', 'user', 'project', 'res', msg) - expected = {'event_type': msg['event_type'], - 'host': msg['publisher_id']} - self.assertEqual(expected, s.resource_metadata) - - def test_sample_from_notifications_dict(self): - msg = { - 'event_type': u'sample.create', - 'timestamp': u'2015-06-1909: 19: 35.786893', - 'payload': {u'counter_name': u'instance100'}, - 'priority': 'info', - 'publisher_id': u'ceilometer.api', - 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e' - } - s = sample.Sample.from_notification( - 'sample', 'type', 1.0, '%', 'user', 'project', 'res', msg) - msg['payload']['event_type'] = msg['event_type'] - msg['payload']['host'] = msg['publisher_id'] - self.assertEqual(msg['payload'], s.resource_metadata) diff --git a/ceilometer/tests/unit/test_utils.py b/ceilometer/tests/unit/test_utils.py index 429313e8..769b8bac 100644 --- a/ceilometer/tests/unit/test_utils.py +++ b/ceilometer/tests/unit/test_utils.py @@ -84,41 +84,6 @@ class TestUtils(base.BaseTestCase): else: self.assertIn((k, v), expected) - def test_restore_nesting_unested(self): - metadata = {'a': 'A', 'b': 'B'} - unwound = utils.restore_nesting(metadata) - self.assertIs(metadata, unwound) - - def test_restore_nesting(self): - metadata = {'a': 'A', 'b': 'B', - 'nested:a': 'A', - 'nested:b': 'B', - 'nested:twice:c': 'C', - 'nested:twice:d': 'D', - 'embedded:e': 'E'} - unwound = utils.restore_nesting(metadata) - expected = {'a': 'A', 'b': 'B', - 'nested': {'a': 'A', 'b': 'B', - 'twice': {'c': 'C', 'd': 'D'}}, - 'embedded': {'e': 'E'}} - self.assertEqual(expected, unwound) - self.assertIsNot(metadata, unwound) - - def test_restore_nesting_with_separator(self): - metadata = {'a': 'A', 'b': 'B', - 'nested.a': 'A', - 'nested.b': 'B', - 'nested.twice.c': 'C', - 'nested.twice.d': 'D', - 'embedded.e': 'E'} - unwound = utils.restore_nesting(metadata, separator='.') - expected = {'a': 'A', 'b': 'B', - 'nested': {'a': 'A', 'b': 'B', - 'twice': {'c': 'C', 'd': 'D'}}, - 'embedded': {'e': 'E'}} - self.assertEqual(expected, unwound) - self.assertIsNot(metadata, unwound) - def test_decimal_to_dt_with_none_parameter(self): self.assertIsNone(utils.decimal_to_dt(None)) @@ -138,43 +103,3 @@ class TestUtils(base.BaseTestCase): ('nested2[0].c', 'A'), ('nested2[1].c', 'B')], sorted(pairs, key=lambda x: x[0])) - - def test_hash_of_set(self): - x = ['a', 'b'] - y = ['a', 'b', 'a'] - z = ['a', 'c'] - self.assertEqual(utils.hash_of_set(x), utils.hash_of_set(y)) - self.assertNotEqual(utils.hash_of_set(x), utils.hash_of_set(z)) - self.assertNotEqual(utils.hash_of_set(y), utils.hash_of_set(z)) - - def test_hash_ring(self): - num_nodes = 10 - num_keys = 1000 - - nodes = [str(x) for x in range(num_nodes)] - hr = utils.HashRing(nodes) - - buckets = [0] * num_nodes - assignments = [-1] * num_keys - for k in range(num_keys): - n = int(hr.get_node(str(k))) - self.assertTrue(0 <= n <= num_nodes) - buckets[n] += 1 - assignments[k] = n - - # at least something in each bucket - self.assertTrue(all((c > 0 for c in buckets))) - - # approximately even distribution - diff = max(buckets) - min(buckets) - self.assertTrue(diff < 0.3 * (num_keys / num_nodes)) - - # consistency - num_nodes += 1 - nodes.append(str(num_nodes + 1)) - hr = utils.HashRing(nodes) - for k in range(num_keys): - n = int(hr.get_node(str(k))) - assignments[k] -= n - reassigned = len([c for c in assignments if c != 0]) - self.assertTrue(reassigned < num_keys / num_nodes) diff --git a/ceilometer/tests/unit/transformer/__init__.py b/ceilometer/tests/unit/transformer/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ceilometer/tests/unit/transformer/test_conversions.py b/ceilometer/tests/unit/transformer/test_conversions.py deleted file mode 100644 index 702f0f67..00000000 --- a/ceilometer/tests/unit/transformer/test_conversions.py +++ /dev/null @@ -1,114 +0,0 @@ -# -# Copyright 2016 IBM Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import copy - -from oslo_utils import timeutils -from oslotest import base - -from ceilometer import sample -from ceilometer.transformer import conversions - - -class AggregatorTransformerTestCase(base.BaseTestCase): - SAMPLE = sample.Sample( - name='cpu', - type=sample.TYPE_CUMULATIVE, - unit='ns', - volume='1234567', - user_id='56c5692032f34041900342503fecab30', - project_id='ac9494df2d9d4e709bac378cceabaf23', - resource_id='1ca738a1-c49c-4401-8346-5c60ebdb03f4', - timestamp="2015-10-29 14:12:15.485877+00:00", - resource_metadata={} - ) - - def setUp(self): - super(AggregatorTransformerTestCase, self).setUp() - self._sample_offset = 0 - - def test_init_input_validation(self): - aggregator = conversions.AggregatorTransformer("2", "15", None, - None, None) - self.assertEqual(2, aggregator.size) - self.assertEqual(15, aggregator.retention_time) - - def test_init_no_size_or_rention_time(self): - aggregator = conversions.AggregatorTransformer() - self.assertEqual(1, aggregator.size) - self.assertIsNone(aggregator.retention_time) - - def test_init_size_zero(self): - aggregator = conversions.AggregatorTransformer(size="0") - self.assertEqual(1, aggregator.size) - self.assertIsNone(aggregator.retention_time) - - def test_init_input_validation_size_invalid(self): - self.assertRaises(ValueError, conversions.AggregatorTransformer, - "abc", "15", None, None, None) - - def test_init_input_validation_retention_time_invalid(self): - self.assertRaises(ValueError, conversions.AggregatorTransformer, - "2", "abc", None, None, None) - - def test_init_no_timestamp(self): - aggregator = conversions.AggregatorTransformer("1", "1", None, - None, None) - self.assertEqual("first", aggregator.timestamp) - - def test_init_timestamp_none(self): - aggregator = conversions.AggregatorTransformer("1", "1", None, - None, None, None) - self.assertEqual("first", aggregator.timestamp) - - def test_init_timestamp_first(self): - aggregator = conversions.AggregatorTransformer("1", "1", None, - None, None, "first") - self.assertEqual("first", aggregator.timestamp) - - def test_init_timestamp_last(self): - aggregator = conversions.AggregatorTransformer("1", "1", None, - None, None, "last") - self.assertEqual("last", aggregator.timestamp) - - def test_init_timestamp_invalid(self): - aggregator = conversions.AggregatorTransformer("1", "1", None, - None, None, - "invalid_option") - self.assertEqual("first", aggregator.timestamp) - - def test_size_unbounded(self): - aggregator = conversions.AggregatorTransformer(size="0", - retention_time="300") - self._insert_sample_data(aggregator) - - samples = aggregator.flush() - - self.assertEqual([], samples) - - def test_size_bounded(self): - aggregator = conversions.AggregatorTransformer(size="100") - self._insert_sample_data(aggregator) - - samples = aggregator.flush() - - self.assertEqual(100, len(samples)) - - def _insert_sample_data(self, aggregator): - for _ in range(100): - sample = copy.copy(self.SAMPLE) - sample.resource_id = sample.resource_id + str(self._sample_offset) - sample.timestamp = timeutils.isotime() - aggregator.handle_sample(sample) - self._sample_offset += 1 diff --git a/ceilometer/transformer/__init__.py b/ceilometer/transformer/__init__.py deleted file mode 100644 index 48d78b4d..00000000 --- a/ceilometer/transformer/__init__.py +++ /dev/null @@ -1,77 +0,0 @@ -# -# Copyright 2013 Intel Corp. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc -import collections - -import six - - -@six.add_metaclass(abc.ABCMeta) -class TransformerBase(object): - """Base class for plugins that transform the sample.""" - - def __init__(self, **kwargs): - """Setup transformer. - - Each time a transformed is involved in a pipeline, a new transformer - instance is created and chained into the pipeline. i.e. transformer - instance is per pipeline. This helps if transformer need keep some - cache and per-pipeline information. - - :param kwargs: The parameters that are defined in pipeline config file. - """ - super(TransformerBase, self).__init__() - - @abc.abstractmethod - def handle_sample(self, sample): - """Transform a sample. - - :param sample: A sample. - """ - - @abc.abstractproperty - def grouping_keys(self): - """Keys used to group transformer.""" - - @staticmethod - def flush(): - """Flush samples cached previously.""" - return [] - - -class Namespace(object): - """Encapsulates the namespace. - - Encapsulation is done by wrapping the evaluation of the configured rule. - This allows nested dicts to be accessed in the attribute style, - and missing attributes to yield false when used in a boolean expression. - """ - def __init__(self, seed): - self.__dict__ = collections.defaultdict(lambda: Namespace({})) - self.__dict__.update(seed) - for k, v in six.iteritems(self.__dict__): - if isinstance(v, dict): - self.__dict__[k] = Namespace(v) - - def __getattr__(self, attr): - return self.__dict__[attr] - - def __getitem__(self, key): - return self.__dict__[key] - - def __nonzero__(self): - return len(self.__dict__) > 0 - __bool__ = __nonzero__ diff --git a/ceilometer/transformer/accumulator.py b/ceilometer/transformer/accumulator.py deleted file mode 100644 index 1e14497c..00000000 --- a/ceilometer/transformer/accumulator.py +++ /dev/null @@ -1,44 +0,0 @@ -# -# Copyright 2013 Julien Danjou -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from ceilometer import transformer - - -class TransformerAccumulator(transformer.TransformerBase): - """Transformer that accumulates samples until a threshold. - - And then flushes them out into the wild. - """ - - grouping_keys = ['resource_id'] - - def __init__(self, size=1, **kwargs): - if size >= 1: - self.samples = [] - self.size = size - super(TransformerAccumulator, self).__init__(**kwargs) - - def handle_sample(self, sample): - if self.size >= 1: - self.samples.append(sample) - else: - return sample - - def flush(self): - if len(self.samples) >= self.size: - x = self.samples - self.samples = [] - return x - return [] diff --git a/ceilometer/transformer/arithmetic.py b/ceilometer/transformer/arithmetic.py deleted file mode 100644 index d0bbccb7..00000000 --- a/ceilometer/transformer/arithmetic.py +++ /dev/null @@ -1,156 +0,0 @@ -# -# Copyright 2014 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import keyword -import math -import re - -from oslo_log import log -import six - -from ceilometer.i18n import _ -from ceilometer import sample -from ceilometer import transformer - -LOG = log.getLogger(__name__) - - -class ArithmeticTransformer(transformer.TransformerBase): - """Multi meter arithmetic transformer. - - Transformer that performs arithmetic operations - over one or more meters and/or their metadata. - """ - - grouping_keys = ['resource_id'] - - meter_name_re = re.compile(r'\$\(([\w\.\-]+)\)') - - def __init__(self, target=None, **kwargs): - super(ArithmeticTransformer, self).__init__(**kwargs) - target = target or {} - self.target = target - self.expr = target.get('expr', '') - self.expr_escaped, self.escaped_names = self.parse_expr(self.expr) - self.required_meters = list(self.escaped_names.values()) - self.misconfigured = len(self.required_meters) == 0 - if not self.misconfigured: - self.reference_meter = self.required_meters[0] - # convert to set for more efficient contains operation - self.required_meters = set(self.required_meters) - self.cache = collections.defaultdict(dict) - self.latest_timestamp = None - else: - LOG.warning(_('Arithmetic transformer must use at least one' - ' meter in expression \'%s\''), self.expr) - - def _update_cache(self, _sample): - """Update the cache with the latest sample.""" - escaped_name = self.escaped_names.get(_sample.name, '') - if escaped_name not in self.required_meters: - return - self.cache[_sample.resource_id][escaped_name] = _sample - - def _check_requirements(self, resource_id): - """Check if all the required meters are available in the cache.""" - return len(self.cache[resource_id]) == len(self.required_meters) - - def _calculate(self, resource_id): - """Evaluate the expression and return a new sample if successful.""" - ns_dict = dict((m, s.as_dict()) for m, s - in six.iteritems(self.cache[resource_id])) - ns = transformer.Namespace(ns_dict) - try: - new_volume = eval(self.expr_escaped, {}, ns) - if math.isnan(new_volume): - raise ArithmeticError(_('Expression evaluated to ' - 'a NaN value!')) - - reference_sample = self.cache[resource_id][self.reference_meter] - return sample.Sample( - name=self.target.get('name', reference_sample.name), - unit=self.target.get('unit', reference_sample.unit), - type=self.target.get('type', reference_sample.type), - volume=float(new_volume), - user_id=reference_sample.user_id, - project_id=reference_sample.project_id, - resource_id=reference_sample.resource_id, - timestamp=self.latest_timestamp, - resource_metadata=reference_sample.resource_metadata - ) - except Exception as e: - LOG.warning(_('Unable to evaluate expression %(expr)s: %(exc)s'), - {'expr': self.expr, 'exc': e}) - - def handle_sample(self, _sample): - self._update_cache(_sample) - self.latest_timestamp = _sample.timestamp - - def flush(self): - new_samples = [] - cache_clean_list = [] - if not self.misconfigured: - for resource_id in self.cache: - if self._check_requirements(resource_id): - new_samples.append(self._calculate(resource_id)) - cache_clean_list.append(resource_id) - for res_id in cache_clean_list: - self.cache.pop(res_id) - return new_samples - - @classmethod - def parse_expr(cls, expr): - """Transforms meter names in the expression into valid identifiers. - - :param expr: unescaped expression - :return: A tuple of the escaped expression and a dict representing - the translation of meter names into Python identifiers - """ - - class Replacer(object): - """Replaces matched meter names with escaped names. - - If the meter name is not followed by parameter access in the - expression, it defaults to accessing the 'volume' parameter. - """ - - def __init__(self, original_expr): - self.original_expr = original_expr - self.escaped_map = {} - - def __call__(self, match): - meter_name = match.group(1) - escaped_name = self.escape(meter_name) - self.escaped_map[meter_name] = escaped_name - - if (match.end(0) == len(self.original_expr) or - self.original_expr[match.end(0)] != '.'): - escaped_name += '.volume' - return escaped_name - - @staticmethod - def escape(name): - has_dot = '.' in name - if has_dot: - name = name.replace('.', '_') - - if has_dot or name.endswith('ESC') or name in keyword.kwlist: - name = "_" + name + '_ESC' - return name - - replacer = Replacer(expr) - expr = re.sub(cls.meter_name_re, replacer, expr) - return expr, replacer.escaped_map diff --git a/ceilometer/transformer/conversions.py b/ceilometer/transformer/conversions.py deleted file mode 100644 index f4ea252e..00000000 --- a/ceilometer/transformer/conversions.py +++ /dev/null @@ -1,340 +0,0 @@ -# -# Copyright 2013 Red Hat, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import collections -import re - -from oslo_log import log -from oslo_utils import timeutils -import six - -from ceilometer.i18n import _, _LW -from ceilometer import sample -from ceilometer import transformer - -LOG = log.getLogger(__name__) - - -class BaseConversionTransformer(transformer.TransformerBase): - """Transformer to derive conversion.""" - - grouping_keys = ['resource_id'] - - def __init__(self, source=None, target=None, **kwargs): - """Initialize transformer with configured parameters. - - :param source: dict containing source sample unit - :param target: dict containing target sample name, type, - unit and scaling factor (a missing value - connotes no change) - """ - source = source or {} - target = target or {} - self.source = source - self.target = target - super(BaseConversionTransformer, self).__init__(**kwargs) - - def _map(self, s, attr): - """Apply the name or unit mapping if configured.""" - mapped = None - from_ = self.source.get('map_from') - to_ = self.target.get('map_to') - if from_ and to_: - if from_.get(attr) and to_.get(attr): - try: - mapped = re.sub(from_[attr], to_[attr], getattr(s, attr)) - except Exception: - pass - return mapped or self.target.get(attr, getattr(s, attr)) - - -class DeltaTransformer(BaseConversionTransformer): - """Transformer based on the delta of a sample volume.""" - - def __init__(self, target=None, growth_only=False, **kwargs): - """Initialize transformer with configured parameters. - - :param growth_only: capture only positive deltas - """ - super(DeltaTransformer, self).__init__(target=target, **kwargs) - self.growth_only = growth_only - self.cache = {} - - def handle_sample(self, s): - """Handle a sample, converting if necessary.""" - key = s.name + s.resource_id - prev = self.cache.get(key) - timestamp = timeutils.parse_isotime(s.timestamp) - self.cache[key] = (s.volume, timestamp) - - if prev: - prev_volume = prev[0] - prev_timestamp = prev[1] - time_delta = timeutils.delta_seconds(prev_timestamp, timestamp) - # disallow violations of the arrow of time - if time_delta < 0: - LOG.warning(_LW('Dropping out of time order sample: %s'), (s,)) - # Reset the cache to the newer sample. - self.cache[key] = prev - return None - volume_delta = s.volume - prev_volume - if self.growth_only and volume_delta < 0: - LOG.warning(_LW('Negative delta detected, dropping value')) - s = None - else: - s = self._convert(s, volume_delta) - LOG.debug('Converted to: %s', s) - else: - LOG.warning(_LW('Dropping sample with no predecessor: %s'), (s,)) - s = None - return s - - def _convert(self, s, delta): - """Transform the appropriate sample fields.""" - return sample.Sample( - name=self._map(s, 'name'), - unit=s.unit, - type=sample.TYPE_DELTA, - volume=delta, - user_id=s.user_id, - project_id=s.project_id, - resource_id=s.resource_id, - timestamp=s.timestamp, - resource_metadata=s.resource_metadata - ) - - -class ScalingTransformer(BaseConversionTransformer): - """Transformer to apply a scaling conversion.""" - - def __init__(self, source=None, target=None, **kwargs): - """Initialize transformer with configured parameters. - - :param source: dict containing source sample unit - :param target: dict containing target sample name, type, - unit and scaling factor (a missing value - connotes no change) - """ - super(ScalingTransformer, self).__init__(source=source, target=target, - **kwargs) - self.scale = self.target.get('scale') - LOG.debug('scaling conversion transformer with source:' - ' %(source)s target: %(target)s:', {'source': self.source, - 'target': self.target}) - - def _scale(self, s): - """Apply the scaling factor. - - Either a straight multiplicative factor or else a string to be eval'd. - """ - ns = transformer.Namespace(s.as_dict()) - - scale = self.scale - return ((eval(scale, {}, ns) if isinstance(scale, six.string_types) - else s.volume * scale) if scale else s.volume) - - def _convert(self, s, growth=1): - """Transform the appropriate sample fields.""" - return sample.Sample( - name=self._map(s, 'name'), - unit=self._map(s, 'unit'), - type=self.target.get('type', s.type), - volume=self._scale(s) * growth, - user_id=s.user_id, - project_id=s.project_id, - resource_id=s.resource_id, - timestamp=s.timestamp, - resource_metadata=s.resource_metadata - ) - - def handle_sample(self, s): - """Handle a sample, converting if necessary.""" - LOG.debug('handling sample %s', s) - if self.source.get('unit', s.unit) == s.unit: - s = self._convert(s) - LOG.debug('converted to: %s', s) - return s - - -class RateOfChangeTransformer(ScalingTransformer): - """Transformer based on the rate of change of a sample volume. - - For example, taking the current and previous volumes of a cumulative sample - and producing a gauge value based on the proportion of some maximum used. - """ - - def __init__(self, **kwargs): - """Initialize transformer with configured parameters.""" - super(RateOfChangeTransformer, self).__init__(**kwargs) - self.cache = {} - self.scale = self.scale or '1' - - def handle_sample(self, s): - """Handle a sample, converting if necessary.""" - LOG.debug('handling sample %s', s) - key = s.name + s.resource_id - prev = self.cache.get(key) - timestamp = timeutils.parse_isotime(s.timestamp) - self.cache[key] = (s.volume, timestamp) - - if prev: - prev_volume = prev[0] - prev_timestamp = prev[1] - time_delta = timeutils.delta_seconds(prev_timestamp, timestamp) - # disallow violations of the arrow of time - if time_delta < 0: - LOG.warning(_('dropping out of time order sample: %s'), (s,)) - # Reset the cache to the newer sample. - self.cache[key] = prev - return None - # we only allow negative volume deltas for noncumulative - # samples, whereas for cumulative we assume that a reset has - # occurred in the interim so that the current volume gives a - # lower bound on growth - volume_delta = (s.volume - prev_volume - if (prev_volume <= s.volume or - s.type != sample.TYPE_CUMULATIVE) - else s.volume) - rate_of_change = ((1.0 * volume_delta / time_delta) - if time_delta else 0.0) - - s = self._convert(s, rate_of_change) - LOG.debug('converted to: %s', s) - else: - LOG.warning(_('dropping sample with no predecessor: %s'), - (s,)) - s = None - return s - - -class AggregatorTransformer(ScalingTransformer): - """Transformer that aggregates samples. - - Aggregation goes until a threshold or/and a retention_time, and then - flushes them out into the wild. - - Example: - To aggregate sample by resource_metadata and keep the - resource_metadata of the latest received sample; - - AggregatorTransformer(retention_time=60, resource_metadata='last') - - To aggregate sample by user_id and resource_metadata and keep the - user_id of the first received sample and drop the resource_metadata. - - AggregatorTransformer(size=15, user_id='first', - resource_metadata='drop') - - To keep the timestamp of the last received sample rather - than the first: - - AggregatorTransformer(timestamp="last") - - """ - - def __init__(self, size=1, retention_time=None, - project_id=None, user_id=None, resource_metadata="last", - timestamp="first", **kwargs): - super(AggregatorTransformer, self).__init__(**kwargs) - self.samples = {} - self.counts = collections.defaultdict(int) - self.size = int(size) if size else None - self.retention_time = float(retention_time) if retention_time else None - if not (self.size or self.retention_time): - self.size = 1 - - if timestamp in ["first", "last"]: - self.timestamp = timestamp - else: - self.timestamp = "first" - - self.initial_timestamp = None - self.aggregated_samples = 0 - - self.key_attributes = [] - self.merged_attribute_policy = {} - - self._init_attribute('project_id', project_id) - self._init_attribute('user_id', user_id) - self._init_attribute('resource_metadata', resource_metadata, - is_droppable=True, mandatory=True) - - def _init_attribute(self, name, value, is_droppable=False, - mandatory=False): - drop = ['drop'] if is_droppable else [] - if value or mandatory: - if value not in ['last', 'first'] + drop: - LOG.warning('%s is unknown (%s), using last' % (name, value)) - value = 'last' - self.merged_attribute_policy[name] = value - else: - self.key_attributes.append(name) - - def _get_unique_key(self, s): - # NOTE(arezmerita): in samples generated by ceilometer middleware, - # when accessing without authentication publicly readable/writable - # swift containers, the project_id and the user_id are missing. - # They will be replaced by for unique key construction. - keys = ['' if getattr(s, f) is None else getattr(s, f) - for f in self.key_attributes] - non_aggregated_keys = "-".join(keys) - # NOTE(sileht): it assumes, a meter always have the same unit/type - return "%s-%s-%s" % (s.name, s.resource_id, non_aggregated_keys) - - def handle_sample(self, sample_): - if not self.initial_timestamp: - self.initial_timestamp = timeutils.parse_isotime(sample_.timestamp) - - self.aggregated_samples += 1 - key = self._get_unique_key(sample_) - self.counts[key] += 1 - if key not in self.samples: - self.samples[key] = self._convert(sample_) - if self.merged_attribute_policy[ - 'resource_metadata'] == 'drop': - self.samples[key].resource_metadata = {} - else: - if self.timestamp == "last": - self.samples[key].timestamp = sample_.timestamp - if sample_.type == sample.TYPE_CUMULATIVE: - self.samples[key].volume = self._scale(sample_) - else: - self.samples[key].volume += self._scale(sample_) - for field in self.merged_attribute_policy: - if self.merged_attribute_policy[field] == 'last': - setattr(self.samples[key], field, - getattr(sample_, field)) - - def flush(self): - if not self.initial_timestamp: - return [] - - expired = (self.retention_time and - timeutils.is_older_than(self.initial_timestamp, - self.retention_time)) - full = self.size and self.aggregated_samples >= self.size - if full or expired: - x = list(self.samples.values()) - # gauge aggregates need to be averages - for s in x: - if s.type == sample.TYPE_GAUGE: - key = self._get_unique_key(s) - s.volume /= self.counts[key] - self.samples.clear() - self.counts.clear() - self.aggregated_samples = 0 - self.initial_timestamp = None - return x - return [] diff --git a/ceilometer/utils.py b/ceilometer/utils.py index a4495da7..244490fc 100644 --- a/ceilometer/utils.py +++ b/ceilometer/utils.py @@ -18,45 +18,16 @@ """Utilities and helper functions.""" -import bisect import calendar import copy import datetime import decimal -import hashlib -import struct -import threading -from oslo_concurrency import processutils -from oslo_config import cfg from oslo_utils import timeutils from oslo_utils import units import six -OPTS = [ - cfg.StrOpt('rootwrap_config', - default="/etc/ceilometer/rootwrap.conf", - help='Path to the rootwrap configuration file to' - 'use for running commands as root'), -] -CONF = cfg.CONF -CONF.register_opts(OPTS) - -EPOCH_TIME = datetime.datetime(1970, 1, 1) - - -def _get_root_helper(): - return 'sudo ceilometer-rootwrap %s' % CONF.rootwrap_config - - -def execute(*cmd, **kwargs): - """Convenience wrapper around oslo's execute() method.""" - if 'run_as_root' in kwargs and 'root_helper' not in kwargs: - kwargs['root_helper'] = _get_root_helper() - return processutils.execute(*cmd, **kwargs) - - def decode_unicode(input): """Decode the unicode of the message, and encode it into utf-8.""" if isinstance(input, dict): @@ -93,19 +64,6 @@ def recursive_keypairs(d, separator=':'): yield name, value -def restore_nesting(d, separator=':'): - """Unwinds a flattened dict to restore nesting.""" - d = copy.copy(d) if any([separator in k for k in d.keys()]) else d - for k, v in d.copy().items(): - if separator in k: - top, rem = k.split(separator, 1) - nest = d[top] if isinstance(d.get(top), dict) else {} - nest[rem] = v - d[top] = restore_nesting(nest, separator) - del d[k] - return d - - def dt_to_decimal(utc): """Datetime to Decimal. @@ -141,13 +99,6 @@ def sanitize_timestamp(timestamp): return timeutils.normalize_time(timestamp) -def stringify_timestamps(data): - """Stringify any datetime in given dict.""" - isa_timestamp = lambda v: isinstance(v, datetime.datetime) - return dict((k, v.isoformat() if isa_timestamp(v) else v) - for (k, v) in six.iteritems(data)) - - def dict_to_keyval(value, key_base=None): """Expand a given dict to its corresponding key-value pairs. @@ -172,21 +123,6 @@ def dict_to_keyval(value, key_base=None): yield key_gen, v -def lowercase_keys(mapping): - """Converts the values of the keys in mapping to lowercase.""" - items = mapping.items() - for key, value in items: - del mapping[key] - mapping[key.lower()] = value - - -def lowercase_values(mapping): - """Converts the values in the mapping dict to lowercase.""" - items = mapping.items() - for key, value in items: - mapping[key] = value.lower() - - def update_nested(original_dict, updates): """Updates the leaf nodes in a nest dict. @@ -200,65 +136,3 @@ def update_nested(original_dict, updates): else: dict_to_update[key] = updates[key] return dict_to_update - - -def uniq(dupes, attrs): - """Exclude elements of dupes with a duplicated set of attribute values.""" - key = lambda d: '/'.join([getattr(d, a) or '' for a in attrs]) - keys = [] - deduped = [] - for d in dupes: - if key(d) not in keys: - deduped.append(d) - keys.append(key(d)) - return deduped - - -def hash_of_set(s): - return str(hash(frozenset(s))) - - -class HashRing(object): - - def __init__(self, nodes, replicas=100): - self._ring = dict() - self._sorted_keys = [] - - for node in nodes: - for r in six.moves.range(replicas): - hashed_key = self._hash('%s-%s' % (node, r)) - self._ring[hashed_key] = node - self._sorted_keys.append(hashed_key) - self._sorted_keys.sort() - - @staticmethod - def _hash(key): - return struct.unpack_from('>I', - hashlib.md5(str(key).encode()).digest())[0] - - def _get_position_on_ring(self, key): - hashed_key = self._hash(key) - position = bisect.bisect(self._sorted_keys, hashed_key) - return position if position < len(self._sorted_keys) else 0 - - def get_node(self, key): - if not self._ring: - return None - pos = self._get_position_on_ring(key) - return self._ring[self._sorted_keys[pos]] - - -def kill_listeners(listeners): - # NOTE(gordc): correct usage of oslo.messaging listener is to stop(), - # which stops new messages, and wait(), which processes remaining - # messages and closes connection - for listener in listeners: - listener.stop() - listener.wait() - - -def spawn_thread(target, *args, **kwargs): - t = threading.Thread(target=target, args=args, kwargs=kwargs) - t.daemon = True - t.start() - return t diff --git a/devstack/README.rst b/devstack/README.rst index 0c99a7e9..2499aa67 100644 --- a/devstack/README.rst +++ b/devstack/README.rst @@ -12,11 +12,6 @@ Enabling Ceilometer in DevStack [[local|localrc]] enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer - To use stable branches, make sure devstack is on that branch, and specify - the branch name to enable_plugin, for example:: - - enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer stable/mitaka - There are some options, such as CEILOMETER_BACKEND, defined in ``ceilometer/devstack/settings``, they can be used to configure the installation of Ceilometer. If you don't want to use their default value, diff --git a/devstack/files/rpms/ceilometer b/devstack/files/rpms/ceilometer deleted file mode 100644 index 9c87c401..00000000 --- a/devstack/files/rpms/ceilometer +++ /dev/null @@ -1 +0,0 @@ -selinux-policy-targeted diff --git a/devstack/plugin.sh b/devstack/plugin.sh index 7e508b24..d5f4ff1e 100644 --- a/devstack/plugin.sh +++ b/devstack/plugin.sh @@ -6,38 +6,10 @@ # [[local|localrc]] # enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer # -# By default all ceilometer services are started (see devstack/settings) -# except for the ceilometer-aipmi service. To disable a specific service -# use the disable_service function. -# -# NOTE: Currently, there are two ways to get the IPMI based meters in -# OpenStack. One way is to configure Ironic conductor to report those meters -# for the nodes managed by Ironic and to have Ceilometer notification -# agent to collect them. Ironic by default does NOT enable that reporting -# functionality. So in order to do so, users need to set the option of -# conductor.send_sensor_data to true in the ironic.conf configuration file -# for the Ironic conductor service, and also enable the -# ceilometer-anotification service. -# -# The other way is to use Ceilometer ipmi agent only to get the IPMI based -# meters. To make use of the Ceilometer ipmi agent, it must be explicitly -# enabled with the following setting: -# -# enable_service ceilometer-aipmi -# -# To avoid duplicated meters, users need to make sure to set the -# option of conductor.send_sensor_data to false in the ironic.conf -# configuration file if the node on which Ceilometer ipmi agent is running -# is also managed by Ironic. -# # Several variables set in the localrc section adjust common behaviors # of Ceilometer (see within for additional settings): # -# CEILOMETER_PIPELINE_INTERVAL: Seconds between pipeline processing runs. Default 600. # CEILOMETER_BACKEND: Database backend (e.g. 'mysql', 'mongodb', 'es') -# CEILOMETER_COORDINATION_URL: URL for group membership service provided by tooz. -# CEILOMETER_EVENTS: Set to True to enable event collection -# CEILOMETER_EVENT_ALARM: Set to True to enable publisher for event alarming # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -51,13 +23,6 @@ else CEILOMETER_BIN_DIR=$(get_python_exec_prefix) fi -# Test if any Ceilometer services are enabled -# is_ceilometer_enabled -function is_ceilometer_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"ceilometer-" ]] && return 0 - return 1 -} - function ceilometer_service_url { echo "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" } @@ -85,20 +50,6 @@ function _ceilometer_install_mongodb { sleep 5 } -# _ceilometer_install_redis() - Install the redis server and python lib. -function _ceilometer_install_redis { - if is_ubuntu; then - install_package redis-server - restart_service redis-server - else - # This will fail (correctly) where a redis package is unavailable - install_package redis - restart_service redis - fi - - pip_install_gr redis -} - # Configure mod_wsgi function _ceilometer_config_apache_wsgi { sudo mkdir -p $CEILOMETER_WSGI_DIR @@ -124,15 +75,6 @@ function _ceilometer_config_apache_wsgi { " -i $ceilometer_apache_conf } -# Install required services for coordination -function _ceilometer_prepare_coordination { - if echo $CEILOMETER_COORDINATION_URL | grep -q '^memcached:'; then - install_package memcached - elif [[ "${CEILOMETER_COORDINATOR_URL%%:*}" == "redis" || "${CEILOMETER_CACHE_BACKEND##*.}" == "redis" ]]; then - _ceilometer_install_redis - fi -} - # Install required services for storage backends function _ceilometer_prepare_storage_backend { if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then @@ -147,21 +89,6 @@ function _ceilometer_prepare_storage_backend { } -# Install the python modules for inspecting nova virt instances -function _ceilometer_prepare_virt_drivers { - # Only install virt drivers if we're running nova compute - if is_service_enabled n-cpu ; then - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - pip_install_gr libvirt-python - fi - - if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then - pip_install_gr oslo.vmware - fi - fi -} - - # Create ceilometer related accounts in Keystone function _ceilometer_create_accounts { if is_service_enabled ceilometer-api; then @@ -174,11 +101,6 @@ function _ceilometer_create_accounts { "$(ceilometer_service_url)" \ "$(ceilometer_service_url)" \ "$(ceilometer_service_url)" - - if is_service_enabled swift; then - # Ceilometer needs ResellerAdmin role to access Swift account stats. - get_or_add_user_project_role "ResellerAdmin" "ceilometer" $SERVICE_PROJECT_NAME - fi fi } @@ -197,7 +119,7 @@ function _ceilometer_cleanup_apache_wsgi { } function _drop_database { - if is_service_enabled ceilometer-collector ceilometer-api ; then + if is_service_enabled ceilometer-api ; then if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then mongo ceilometer --eval "db.dropDatabase();" elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then @@ -215,52 +137,16 @@ function cleanup_ceilometer { sudo rmdir "$CEILOMETER_CONF_DIR" } -# Set configuraiton for cache backend. -# NOTE(cdent): This currently only works for redis. Still working -# out how to express the other backends. -function _ceilometer_configure_cache_backend { - iniset $CEILOMETER_CONF cache backend $CEILOMETER_CACHE_BACKEND - iniset $CEILOMETER_CONF cache backend_argument url:$CEILOMETER_CACHE_URL - iniadd_literal $CEILOMETER_CONF cache backend_argument distributed_lock:True - if [[ "${CEILOMETER_CACHE_BACKEND##*.}" == "redis" ]]; then - iniadd_literal $CEILOMETER_CONF cache backend_argument db:0 - iniadd_literal $CEILOMETER_CONF cache backend_argument redis_expiration_time:600 - fi -} - - # Set configuration for storage backend. function _ceilometer_configure_storage_backend { if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then iniset $CEILOMETER_CONF database event_connection $(database_connection_url ceilometer) - iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer) elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then # es is only supported for events. we will use sql for metering. iniset $CEILOMETER_CONF database event_connection es://localhost:9200 - iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer) ${TOP_DIR}/pkg/elasticsearch.sh start elif [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then iniset $CEILOMETER_CONF database event_connection mongodb://localhost:27017/ceilometer - iniset $CEILOMETER_CONF database metering_connection mongodb://localhost:27017/ceilometer - elif [ "$CEILOMETER_BACKEND" = 'gnocchi' ] ; then - gnocchi_url=$(gnocchi_service_url) - iniset $CEILOMETER_CONF DEFAULT meter_dispatchers gnocchi - # FIXME(sileht): We shouldn't load event_dispatchers if store_event is False - iniset $CEILOMETER_CONF DEFAULT event_dispatchers "" - iniset $CEILOMETER_CONF notification store_events False - # NOTE(gordc): set higher retry in case gnocchi is started after ceilometer on a slow machine - iniset $CEILOMETER_CONF storage max_retries 20 - # NOTE(gordc): set batching to better handle recording on a slow machine - iniset $CEILOMETER_CONF collector batch_size 50 - iniset $CEILOMETER_CONF collector batch_timeout 5 - iniset $CEILOMETER_CONF dispatcher_gnocchi url $gnocchi_url - iniset $CEILOMETER_CONF dispatcher_gnocchi archive_policy ${GNOCCHI_ARCHIVE_POLICY} - if is_service_enabled swift && [[ "$GNOCCHI_STORAGE_BACKEND" = 'swift' ]] ; then - iniset $CEILOMETER_CONF dispatcher_gnocchi filter_service_activity "True" - iniset $CEILOMETER_CONF dispatcher_gnocchi filter_project "gnocchi_swift" - else - iniset $CEILOMETER_CONF dispatcher_gnocchi filter_service_activity "False" - fi else die $LINENO "Unable to configure unknown CEILOMETER_BACKEND $CEILOMETER_BACKEND" fi @@ -272,80 +158,29 @@ function configure_ceilometer { local conffile - iniset_rpc_backend ceilometer $CEILOMETER_CONF - - iniset $CEILOMETER_CONF oslo_messaging_notifications topics "$CEILOMETER_NOTIFICATION_TOPICS" iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" - if [[ -n "$CEILOMETER_COORDINATION_URL" ]]; then - iniset $CEILOMETER_CONF coordination backend_url $CEILOMETER_COORDINATION_URL - iniset $CEILOMETER_CONF compute workload_partitioning True - iniset $CEILOMETER_CONF notification workload_partitioning True - iniset $CEILOMETER_CONF notification workers $API_WORKERS - fi - - if [[ -n "$CEILOMETER_CACHE_BACKEND" ]]; then - _ceilometer_configure_cache_backend - fi - # Install the policy file and declarative configuration files to # the conf dir. # NOTE(cdent): Do not make this a glob as it will conflict # with rootwrap installation done elsewhere and also clobber # ceilometer.conf settings that have already been made. # Anyway, explicit is better than implicit. - for conffile in policy.json api_paste.ini pipeline.yaml \ - event_definitions.yaml event_pipeline.yaml \ - gnocchi_resources.yaml; do + for conffile in policy.json api_paste.ini; do cp $CEILOMETER_DIR/etc/ceilometer/$conffile $CEILOMETER_CONF_DIR done - if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then - sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml - fi - if [ "$CEILOMETER_EVENT_ALARM" == "True" ]; then - if ! grep -q '^ *- notifier://?topic=alarm.all$' $CEILOMETER_CONF_DIR/event_pipeline.yaml; then - sed -i '/^ *publishers:$/,+1s|^\( *\)-.*$|\1- notifier://?topic=alarm.all\n&|' $CEILOMETER_CONF_DIR/event_pipeline.yaml - fi - fi - - # The compute and central agents need these credentials in order to - # call out to other services' public APIs. - iniset $CEILOMETER_CONF service_credentials auth_type password - iniset $CEILOMETER_CONF service_credentials user_domain_id default - iniset $CEILOMETER_CONF service_credentials project_domain_id default - iniset $CEILOMETER_CONF service_credentials project_name $SERVICE_PROJECT_NAME - iniset $CEILOMETER_CONF service_credentials username ceilometer - iniset $CEILOMETER_CONF service_credentials password $SERVICE_PASSWORD - iniset $CEILOMETER_CONF service_credentials region_name $REGION_NAME - iniset $CEILOMETER_CONF service_credentials auth_url $KEYSTONE_SERVICE_URI - configure_auth_token_middleware $CEILOMETER_CONF ceilometer $CEILOMETER_AUTH_CACHE_DIR - iniset $CEILOMETER_CONF notification store_events $CEILOMETER_EVENTS - # Configure storage - if is_service_enabled ceilometer-collector ceilometer-api; then + if is_service_enabled ceilometer-api; then _ceilometer_configure_storage_backend - iniset $CEILOMETER_CONF collector workers $API_WORKERS - fi - - if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then - iniset $CEILOMETER_CONF DEFAULT hypervisor_inspector vsphere - iniset $CEILOMETER_CONF vmware host_ip "$VMWAREAPI_IP" - iniset $CEILOMETER_CONF vmware host_username "$VMWAREAPI_USER" - iniset $CEILOMETER_CONF vmware host_password "$VMWAREAPI_PASSWORD" fi if is_service_enabled ceilometer-api && [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then iniset $CEILOMETER_CONF api pecan_debug "False" _ceilometer_config_apache_wsgi fi - - if is_service_enabled ceilometer-aipmi; then - # Configure rootwrap for the ipmi agent - configure_rootwrap ceilometer - fi } # init_ceilometer() - Initialize etc. @@ -356,7 +191,7 @@ function init_ceilometer { sudo install -d -o $STACK_USER $CEILOMETER_AUTH_CACHE_DIR rm -f $CEILOMETER_AUTH_CACHE_DIR/* - if is_service_enabled ceilometer-collector ceilometer-api && is_service_enabled mysql postgresql ; then + if is_service_enabled ceilometer-api && is_service_enabled mysql postgresql ; then if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] || [ "$CEILOMETER_BACKEND" = 'es' ] ; then recreate_database ceilometer $CEILOMETER_BIN_DIR/ceilometer-dbsync @@ -365,45 +200,17 @@ function init_ceilometer { } # Install Ceilometer. -# The storage and coordination backends are installed here because the -# virtualenv context is active at this point and python drivers need to be -# installed. The context is not active during preinstall (when it would -# otherwise makes sense to do the backend services). function install_ceilometer { - if is_service_enabled ceilometer-acentral ceilometer-acompute ceilometer-anotification ; then - _ceilometer_prepare_coordination - fi - - if is_service_enabled ceilometer-collector ceilometer-api; then + if is_service_enabled ceilometer-api; then _ceilometer_prepare_storage_backend fi - if is_service_enabled ceilometer-acompute ; then - _ceilometer_prepare_virt_drivers - fi - - install_ceilometerclient setup_develop $CEILOMETER_DIR sudo install -d -o $STACK_USER -m 755 $CEILOMETER_CONF_DIR } -# install_ceilometerclient() - Collect source and prepare -function install_ceilometerclient { - if use_library_from_git "python-ceilometerclient"; then - git_clone_by_name "python-ceilometerclient" - setup_dev_lib "python-ceilometerclient" - sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-ceilometerclient"]}/tools/,/etc/bash_completion.d/}ceilometer.bash_completion - else - pip_install_gr python-ceilometerclient - fi -} - # start_ceilometer() - Start running processes, including screen function start_ceilometer { - run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces central --config-file $CEILOMETER_CONF" - run_process ceilometer-anotification "$CEILOMETER_BIN_DIR/ceilometer-agent-notification --config-file $CEILOMETER_CONF" - run_process ceilometer-aipmi "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces ipmi --config-file $CEILOMETER_CONF" - if [[ "$CEILOMETER_USE_MOD_WSGI" == "False" ]]; then run_process ceilometer-api "$CEILOMETER_BIN_DIR/ceilometer-api -d -v --config-file $CEILOMETER_CONF" elif is_service_enabled ceilometer-api; then @@ -413,19 +220,6 @@ function start_ceilometer { tail_log ceilometer-api /var/log/$APACHE_NAME/ceilometer_access.log fi - # run the collector after restarting apache as it needs - # operational keystone if using gnocchi - run_process ceilometer-collector "$CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF" - - # Start the compute agent late to allow time for the collector to - # fully wake up and connect to the message bus. See bug #1355809 - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP - fi - if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then - run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF" - fi - # Only die on API if it was actually intended to be turned on if is_service_enabled ceilometer-api; then echo "Waiting for ceilometer-api to start..." @@ -445,11 +239,6 @@ function stop_ceilometer { stop_process ceilometer-api fi fi - - # Kill the ceilometer screen windows - for serv in ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector; do - stop_process $serv - done } # This is the main for plugin.sh diff --git a/devstack/settings b/devstack/settings index 06c7c0ce..6cb91a58 100644 --- a/devstack/settings +++ b/devstack/settings @@ -1,11 +1,3 @@ -# turn on all the ceilometer services by default (except for ipmi pollster) -# Pollsters -enable_service ceilometer-acompute ceilometer-acentral -# Notification Agent -enable_service ceilometer-anotification -# Data Collector -enable_service ceilometer-collector -# API service enable_service ceilometer-api # Default directories @@ -18,38 +10,12 @@ CEILOMETER_WSGI_DIR=${CEILOMETER_WSGI_DIR:-/var/www/ceilometer} # Set up database backend CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql} -# Gnocchi default archive_policy for Ceilometer -GNOCCHI_ARCHIVE_POLICY=${GNOCCHI_ARCHIVE_POLICY:-low} - # Ceilometer connection info. CEILOMETER_SERVICE_PROTOCOL=http CEILOMETER_SERVICE_HOST=$SERVICE_HOST CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777} CEILOMETER_USE_MOD_WSGI=${CEILOMETER_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}} -# To enable OSprofiler change value of this variable to "notifications,profiler" -CEILOMETER_NOTIFICATION_TOPICS=${CEILOMETER_NOTIFICATION_TOPICS:-notifications} -CEILOMETER_EVENTS=${CEILOMETER_EVENTS:-True} - -CEILOMETER_COORDINATION_URL=${CEILOMETER_COORDINATION_URL:-redis://localhost:6379} -CEILOMETER_PIPELINE_INTERVAL=${CEILOMETER_PIPELINE_INTERVAL:-} - -# Cache Options -# NOTE(cdent): These are incomplete and specific for this testing. -CEILOMETER_CACHE_BACKEND=${CEILOMETER_CACHE_BACKEND:-dogpile.cache.redis} -CEILOMETER_CACHE_URL=${CEILOMETER_CACHE_URL:-redis://localhost:6379} - -CEILOMETER_EVENT_ALARM=${CEILOMETER_EVENT_ALARM:-False} - -# Tell Tempest this project is present -TEMPEST_SERVICES+=,ceilometer - -# Set up default directories for client and middleware -GITREPO["python-ceilometerclient"]=${CEILOMETERCLIENT_REPO:-${GIT_BASE}/openstack/python-ceilometerclient.git} -GITBRANCH["python-ceilometerclient"]=${CEILOMETERCLIENT_BRANCH:-master} -GITDIR["python-ceilometerclient"]=$DEST/python-ceilometerclient -GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware - # Get rid of this before done. # Tell emacs to use shell-script-mode ## Local variables: diff --git a/devstack/upgrade/settings b/devstack/upgrade/settings index 6a2dad94..53c93ef6 100644 --- a/devstack/upgrade/settings +++ b/devstack/upgrade/settings @@ -1,7 +1,7 @@ register_project_for_upgrade ceilometer devstack_localrc base enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer -devstack_localrc base enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api tempest +devstack_localrc base enable_service ceilometer-api tempest devstack_localrc target enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer -devstack_localrc target enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api tempest +devstack_localrc target enable_service ceilometer-api tempest diff --git a/devstack/upgrade/shutdown.sh b/devstack/upgrade/shutdown.sh index ec0e692b..74d7d99a 100755 --- a/devstack/upgrade/shutdown.sh +++ b/devstack/upgrade/shutdown.sh @@ -22,6 +22,6 @@ stop_ceilometer # ensure everything is stopped -SERVICES_DOWN="ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api" +SERVICES_DOWN="ceilometer-api" ensure_services_stopped $SERVICES_DOWN diff --git a/devstack/upgrade/upgrade.sh b/devstack/upgrade/upgrade.sh index ee4240b0..61fd0d75 100755 --- a/devstack/upgrade/upgrade.sh +++ b/devstack/upgrade/upgrade.sh @@ -73,15 +73,7 @@ $CEILOMETER_BIN_DIR/ceilometer-dbsync || die $LINENO "DB sync error" # Start Ceilometer start_ceilometer -# Note these are process names, not service names -# Note(liamji): Disable the test for -# "ceilometer-polling --polling-namespaces ipmi". In the test environment, -# the impi is not ready. The ceilometer-polling should fail. -ensure_services_started "ceilometer-polling --polling-namespaces compute" \ - "ceilometer-polling --polling-namespaces central" \ - ceilometer-agent-notification \ - ceilometer-api \ - ceilometer-collector +ensure_services_started ceilometer-api # Save mongodb state (replace with snapshot) if grep -q 'connection *= *mongo' /etc/ceilometer/ceilometer.conf; then diff --git a/doc/source/1-agents.png b/doc/source/1-agents.png deleted file mode 100644 index 3cfa8701255969264476e4c9ccd1f6ee7204ca33..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 50041 zcmeFZXH-*d*d~f1qS8cEKm=3}L_nH=bQ`@%QF`xPdM5}<6NvO0dXe6H7X;}|5_*&l zp+hJE!aULU{mwUM&8#`!S##DpKPJjDX78Q--2J+*=iUUrQII6LcIO%%9v+Fb)N5rt zJc2kpyo)DSE`v`#7&P#Je=Zx!O1{QB$Nfob%8de_TzxO4;fRMvM1lLcfR~s|1wJHn zl9m@IL|!E&C%Hf?8v79s?;f7?>ldnSlj}3yZf_T1Tjx;6(^aVO^PH=1UcS2CWxbOx z?VEAY=-b1%K~NBQ_YgDfPMN`Gq29LWxL+qa;`MTJd=nnIqRgd-18^ba*W%{Dwsw(7S|8a4xk zY-bzYRlo}WylS`2ZeIB3eJgup?>}$x5K>R@|9P*?P)GdF+xHs^A!+AdOG?z--318# zxh_pID{%LUK9O(cif(q}nb?ntkt|!%m-N>EGqml|5YHVvIXUUscSpv>i~ndFk(AU)!De zLS8}0+RGs$_1pih^w*tklYcJvwPK9>SFYj+ZJhmZuUZKQ>c8*s{=1W1l`pN34W!nm z$x;^Znp!u0d+^O^Ykc@7&J)0E=rdKF@bg74yE;1hq`bw!;??bOUIBLxFY6o^JC6cc z?7tzO&G~$HLrw%gfB9HB9eEQ{={oI7FM2jFn7M%$@B9g5Cg#XBiAkBS{1x_nuhbK==gU(P zj};U7lx{I|e072(EG-F}2wgllKci zY*mLZgar{J;>&hC78;OOsc5aNf0&)l?BzZ=vhH)ms#@*t(J3}QxEz>t&KEiCIz8+W zdcG9~(Zbne8=GlqY5mUe)#q=7fvdspP=WCACXdBLnZvlex^(Bf@a;AOkN0!<7lOy3 z9Pje7^Tbmt*}aEH#^)Rr)Jn8Rm#Yv@*<8$@KWGM;V#@{2Zq;|TQw?c9x1-+DvETK> zc_VP5)!7qCtI5KfY7Ae1*1##I$6i3P0HFFxjl60y($K;A$`6L6gD8j&F};8)RPICp z%3*Nvk6wK@UpctrVLBo?=|mmAlHR2I`TI=)tkO%Bu=pjm9>Y zrlp{^-yRzX*1nGmU6cjDSr5YSOLXgU=*^0T8Ah8kW0GQ}BacruvX7Uxib5gF=cX{A zo0GeCBFi`hd+upjTAClYpMCh|FS=|zj?qj%plr6$QV1p!NsHTyOWZogDAvR?)ID4c zL%2vqVvqeHPtVVQhgh}kvh{l$*zeU+Wam~o>!c5M`geNtV5{7 zjX4rglu%s0T;M^R#Y~CX$7FrJ=DmU{#QyYt%MME_SVT;dun(bTYnud4PyAJ{W1|NnUT53OM7QXub3&phy?Ts+rbHsp@mX;4)=mNH ztGbT!uVfz_yk2D~(rJ8w^Q&obMGH@h=Go?2HP+oYOW-gdrMxIPcam*&{RxuJZ-zk2 zV>0;KT5hV9I9Az)qaP7+ntcsQJevLa?c+0>1k~2)^WQJzlVUQ!0#vYea2A_)`H6QD z(HKRa#i-L*JCj5*GJ>PB6=4SS3eFk%!Wcw`I%!Cgim92X(@& z<3;JhP3*y1^+uGz*%q69h5(kvsRXB9cm>qz=tnwGv3b#Sr5sRR zqO7u1(bbI+WGo9vh)F&T3ZtoVV@*bqxbqUR#`&?QUg_@1ZS^0&`;#4~M9p}N0IQqM z9Ra1UR1~ALDuXqL`7BR>QoXII$!)xvK^R(uGV3P!oDj*&B#l>xcE8T93Olv2wYEpY zWQ(ruQtEoVI}E2S^6UzSM&txIx*g{^2@YE}UB+_#_#Sa8(`B zx#Y=AdOmxaz;#FM+O+Dxkq}naZagf}MQ~O4s zy*jI*0kuHYzoebjGi7?oL7mj9A#o-3*>wHER+a??E#kx*l$@;oW-%U#!8_fmsmKY& zh@x)u^M#z5ovoUO zj+~iH>K#?Q~g2d^CiQLI3sw*dqJv6QMI!6@`Jvi59}^^-Auw{e_V4 zlkU|C`+}5U&C9>c60^xMdJ+gJsnpo$m8#Vl_;u#075BB2i7t#WAHu!cs4_BPU}MxU zFIGd%V)GW%M4*TqPvIS0uy4$s#@5~5<+!GH`7R`*(s`BaJYr~>HumP#>W^Ktu9w70 zaso_2)KuBM8*g*CsjxUU>`KSgA(}q*bIAG$iyim@mN83Xfjz?$8ySP*utIkub=F5Sg-K=J`20dM@7@8)8x=E_YA}A9~M#D z?~_zTeX!*=b(h78jnIIWYa`W*SEBziBa9Z#S1sf-{^M7szutLwN##zqUPACY2K$Xl zxekD%rmz89b-E6d`a`B77Dg5%R&x``^GfTNE9bP%m3PIzGQ@=)HW*WrZ|rFhktHGv zc4pKi6;C+Mhk+O=HR!!}z*BJr8jiSzkCm8vMP~jEJ}hKz5wNVwO= z#=UG^wJr{ewF$Bm@!oGZi92WgRRto#990irm?PpaNf6_gxIGotSv8gXWPGyIrHdwb z9Dt!qg@>mAy*itN{&sy4e7rpr%VSvlMgE_A7*TtigoeEuK z05AcBkb{FSN1nYnWOR-GZVo6N30J3YCB-Io<%%TagP6kQ4%Zb@d6|=Na4&dpd#);Q z$IfYbyA*%e`El=gHrHVnDq_n=5|n|B+LV1z1uTy%tSSL0s$VF2H(994%g=X`i~xYa zJN8kT*779g7$)IMcrp;yfq^@J0w=*i8FU6Z-aEnk9;C!XFUOzEg7os`V{$KsW(9PQ;6_J{fS6g-_N%xk?6uh1j>GteOcQT3Y`+__*0^orK z0Ajd{-K_zXSUqcRL1dE^;7h!qksbqJ&naU+Iwc+<*fV8u71|Lsek*Y&pGR&1z4<6nrq)%>||oF(gSTN zUBuvj@-4?49?^kKLQZf+=7NrDqna@joh*nE0ytsm%Ho^B)%nkFEp4r@f}GJ*+#uGH zw7a@;-yb~=oM6iK;>57)%v37nUzNl5K-0IB)&n`IfsFplQ^5~6Sl|}yw;AE&>Tve+ zTuTNJxABPa7Y!YJRw{D5w|L1-y_6l{<5>E~(Dop!F>KvC<-@X+$ z`1crjcXzjpfhC|8fPsR&1X?kivvq6;gdLc?F2ql9H?}EYb!B z2Kd#}r1YX}(usUFcPPo7<~puQk7P+lJtFo!;lZ0DzejiBI<%Jki?+m@E+{=BoK{E> zJ^6`9n6+%zi`Rpa_x(VagKlqZRD0ZfbDAA)Y4R*P6S(^-R=gNOvs~D;+ z@gqZ$R7q|+l~5$7cuBF|XHoX?MX}C7x49qaRy5vqDBg~g@Wl=Wu-F=+$lfa&;8d&i zEUp*Oy05bMe0A%3l>1YAJiKgX$J z=gMEL%(=Y$dwBAHa@d|(9M7rvLG50#&@E9$x!J>lr{>$|1>}dFE9Z3$^ItIQ^H!F% z@WSo;2KC?^bgJHYE@Q&h8P3|@HkT3YztVr-vELf{G8_2E=GUP(sc|a^!iFqxU-xSd zBs)>P6uFPP@JULoG4|wl= z1AV=R3$IoT|Nl}zMu{PA6&9m87DH(->V3}Y2M3#UOGGg%nF>wI-iI@krA*FFPES#c zX#6Nk+qrsp58 zqr=-A#QVpY4DT8Jr-1t}QC|P&p$=o9ZL;0&m6|gfXaf??BA%T-hIXYXl)Z029ik;kSJw^_>56c=$VI z<;ZW}zV)S6RaSm&Y}5vYCD+$i*E$LQ^C$R!^2>`J&%r5kb90Zb04f@jJ>Lbj>I$d5 zd1>fXqizWnQ*xf@b_ruHyg5|^9e6tAfBT8{*M$Y+JtSC0=95d9=TlDY0VKla1RhfM z_GP+|Gw0O6BK%vuhInV!Ghh0q++wp37rxj;4`qz~kn&bV<(1Br{fyYySl?9lCfyso z&p8l*__rA1KKN|T+DSaR@4`uN!O7p>%raOqE%p1n(Yz50TN;fjoNr}bj8BR3V$*`h zJVNkTNk^&+>)xT`!FWI5C4PO|NsKc_WfXDqBSE)GTB>XiZDAL)*JnSx`VUhJ+(0!x z!Z-Y%yBtum8ygLl`jb-&ZT| z|8h6t|D61v_h$Y#Yoj6R0p5__%{bUwcayMutC9jtg`l)@&n0N&jk)L)!SIV0J=$0a z#3Qr525~O-Njst^^Cty2Ff;bjBboCeGO%BD*^Xt+NSa*3|aZGzv`V-rU#Y@#{6 zkj7#qWazthn48zQKb~l*O{d}&vrdz)(P*h^yC^Ki#JyG0V2}=>C(k?@U-?qJs%29U z13Or2;?b2J>7Agi(P_fYCdf>c%PUI1i!{4Qc@M(|Tm>V};APfr|8@*M!NChHCiyy^UDKQr=9$YatSct_u8Z3df*JT6#AX{PE{EW)WD zu`x4su!!(#aG04rGHI0DVaRw`CAtdRne(`Hn?CcGc070|(t=4@Mk;Tdm)xXUA`0JfR2yP`jZ3% z(iGq6YGiIL+rrDkJPBV+MU{l2#Du%UllB&r^B9@*msLkSC-h7T%hmuD<&9R3xy{dR|mo z87w6P8fL&4^w}8iJu7!k_$as`+GLc(X43Q70{ON$uohWZ;Z;jKn2OUvVMutg^IdlM zOnAyceskF;j*ED<4CpZS#4^so#+KY%CRRJ?oQ{4}Fu#QVn zT)PB)$M$0=grKI&qc`b*E|&2-G^kphjmBo<77=&!C?8d3es%an{fZ;lnO$+rw?y?h ztA~YDGia=;0Am<6Hy6-w<0WfI(ox3V-P%3^!faTBz23g=`$ZH)tr{Md<~9L7r*tlyUXU)GCUb znDyr#Wf~b(VgoDshvR^Qc$jFFDf$Vq+*d%w8DC~YD5-nNHTbw{lg(1*lF0J(_umaY z3=bh5ofEL2O_{r3ix_DcW4Ltf`O-U5|n% zE@cevUIZV@B>YlRFul`zc26qMu^u>EI>-__<-(l1t^2j=^)25fP=_JYEVwjkcDbUW0R;sva?j{womn#2$IgN-GQ!} z&>SYBbOP#Mlt%nGFW<0psqDV-E2miVynW)%4eA^qJ|?NoMWkD&Tf4x<5Y|62}PJr8kw@_{!mK9 zR7#)@HT8zoxgbwfQIW-=h_kAh=1H5x@fMrF?b6dZZ~;gYE4dS^;hD%V&yOk)H<0gO z=UO27p3S>0%WDlF8e)p5yT4gB7=Bh4ym*?+GrMyjFLHh}pwLrE4IRyra2GRJ>!saq@T-58jYcRW(Tw{w8$=0;2I1U?T z;n}>P5Nw~OKjtO!)U~SFx;}lKcPfUvZ64EX&FHZ)*P2&c*%_$Q7#1F8``Nu@qdg`w zHo|@}hPN8_eXhLXvCzgC5Kt7I<~&Hy5YHy)@J@p#_Yz}O(Q+e@c2Gj*ZIyF=aZ!{X zV_Xgf2`_xwvMDGV4b=J}MosmJJn}VK_ZW z0=dSf1wYsGW0ja!MG?J~T4!6c7-|`{=G;0Q0{Vh|0f`YFuKUHZcjAB^a1}!?r8yav zUa*DrZx*bQS3{l=IyyOlG@gPT=AcCs^m#Y&+8;Q$1So83=Hbq|_HKaRb=x~TO`YfV z(&|!w(qnZ2y;*$Jf=KCr*?yJtCEs(iCnbeShA)#F3E``IkY0pZZZ(Ttw{=Ct_CC~S z!o(!G#<+SgYTccO`HzQ+@$?h{cX;*S@}IM&3sZZ){@^Orki4gbP^tbbnK@iHo^wHG zv&ou|IPf*GGVoSW1dQvfrLny8?#6gjIl@{E+|6B=*|q4Yq~;iZ^~!HE_}9{fc;!yd z{ef6*9q&VW+`o=|;_kq`1>HY=@9OJt0N2cFw@B&1CUZ&@Y(~3vSozbl;sT3-Gz?=53d<6jwTday-#G+Dqh*b- z;SVnh|7kh%%u+AVUPavjm$jWw`$}%!v;YWU(Ku&yah>=fQwM?fGt)TrrF63TNf1m< zdrR^lo!C?}hw-KV6y)ae>~{fK5)QmAp-UZ=sdg>IYt|9u3!aF$lti|z56-52Ze)c# z2uI$@MwdfEl+UBiv$mibQ0{oBEok^-vxY&XT|(AWfA3|rGFLeuhkM#b_J;;`w6sZ!vYwD0BH zR2hz_Fr6_Alg)&foM}iF+WgL@IqU&=h<&OSF;-5guxzk_&^WPjNgXuT1Jzfhxc!w@ zW{Xu3&%ESoRuN}yt%=I51miIK^A;tL?yGzbnT;}E&Un44Cc$nME1EYmh0pgia)kka@%GRibsGl@AeS#=g!K--@Rj?|1X` zn01(+2F3eEAmI+4JRFiB68U@kkF+IT<=_HT=@QkwojcUGN1SbGH}q>51BTOSZ*WNk zRxdne^$r;@Xk%lCME-P;wVOv~>Ya$0%SV7sq5;%%oAKi=J>kx?lw}&}IdQuRa9&ZSH`j5TIQ1 z_7OQA0sWt@fs=yyomV*LUGpe}L+FQfU8&LCps+Yj-0jA;sdOtLPd-5cClgD1%u%hH z^GrUkD~+b9XeZ=7Wo^usskn4B_K9rN6Y~I+aQu%~N34q-r z@?pU`&lmtfd7x!*Nvt%L(M^jUH^tT*YaD+`r-(Y`~D-xxwxREvex?&hgfFC;Fjsyp-*p4_0AJeM$hX?5Py zFBc2oiPvRTYBXw}5{}vo9z=n{CM3rv1PiRvLkur_0))xjw*IrE!F?}>)TWyk)z_1u zXYOPz*Xr{aPE~^?VUXTJs{KHY>MGJ8KJH+CUM=!oh`v`r7Df&2T;AIfvsO>EIH32d zx;o?z_>ujhfY9vwLW~Y?;u>?6hO={-gj_CGjO51O%2zS824OvwCk2mCmB2^pJqjxq z4y;ng9Slo=-M5Z~zeW1*)c6&l>)i&N_NJ31OpJb&GWjG&YDzo+J&eFF4-U|C&{P1g zbG1$`QU79~>ndSZ#~kJAVsELl?ER@}Qr!mJkfbMH!y zTvu&bF}bC8sR^&$QuG&7X$^|r&aaPd^sYQ%ncs->s0hEOBi!X9Mw+Q(DZc{J=_6(y z`PIDc>WYB%StQ28=%dAHkoi<~DY;!|Q&%3lsNK@Sm&s@aS=Y2>)x+nz>>|7qE;&-F z#_@xF$_>=)*g}N)Bj@<47PYrp%cI!J&jkQ&XXl0!CH`731Obb4IDnq-UVjCuxM}&s zUdOHin&5MIGB0XU-+2i%C>m>WCq6o$hpgV%1F_u%3Rd(EU20e}408<|>RjosL!rR% z?d*h1LAF+X%Ui+bq7xmMf%FckDnBXT>11z79bCGe*M_jGX6jr|fipdHI=xw6p?Cli z6QOsHP3ZP|9_&LA>^Pi{mpjsOSqu|Re>OHFSMXuW)pM9UMy{#fxyG+x~ocoY% zoeCXvj=@Uk7|CZHBOE)MIxx?BmUYP)ZA1J0855G=f-o&kEJG}^ulIggsBWIssU&lN zCW0!{AKBAuADVzmXz4=M5Hn#a8M2XB&>B?OuxgKG*3L?K8mG+??D0q8Ai=+anVV3rpac^5p#;%MT45iqh$5=tyb=` z+IdvDT7#Bg=N#Ncj;+nej``<5{(sTFEE3gQOrN~zBh5( zBRlQU$`xLt5V6tE_o_s%xM!zHeK%QFMUKynwjwR4-x?z7ifV-4teMNCqrH2Z;!ZY9 zHOHzD{e!sv|9(@zWiG*T5K92m=VqE|@|Si4KyP1}4(ZwhF8`uL97W84s$5OG`Y~_5 zFU<0D??B?RA!E1-lbe^sc^vE@kyehu;8z8;t=9BMZa+4z`vcR6)`2hAZ#=>}>w^+^ z|6%rr<~0Dn9yjvl+gT0|kLtdKxQLhw4FsKp-w0=+5avsg(mM!#X!OyrlrMV}%e*4+ zs-ClM{b`1XIH>Z zAa9Vo_phSaxKW|NwSLoo=l=c+fFV?Ol)}POY`s%MdJ@wPsjQBfT{z=eeO`wS?qCsJ z1q9+ARrjPcyi|>5@rSGabis3yuK_KETIxTKy)y{A`&&&{gW}8SwTBmkvyxJFbLF%DkotT7>~U;ZPYgZ4C{rloF76jiNP^H|v23rzgA)|+f9hX;FGYS~Pe z*r2J$H#5Xa@LficmQs1bGY zZmIyL%~FmC#d|Mg6-XdSf~m5yJ}L=%bQXXj6vO3w_p0f>5I{pLfS0iGwrc7uvaPZ$ zXY!e@WbriQBzS3M_7(!K6Kt!EeVK4Z-N@k}u+SWmB-mHnf}k`3cL%Az@)?i(sGfES zN0P1i=&Uk(O4QP02|=NDxV#ph>%3@}i49PQm6Wl199d-P?4Y^qAsvnwO9o^U==Odl zJD^No&&^PzssU~ad#>DZQp2Iq`(uVl zm};&(WSa3qVdiZQL8kgw06q?4O50wG6VJVTB~?}nKuE03wAm9VdC%GD;(F*8)N}p{ zX8=JfD;8=K-tgV%0thz&?FD+Y1nt&tD)hvJhPw*@<75^Knf7F_w&3?W z@mmJiR19b6=iZ&XrG1!Nw2=WIwh*b|Heza{3vd=VUJ)=1lp~K80OD9=?^bo0a!|7+ zyz4#j)0^kwX&m$pKsLa`Wn`$b0i#esr`J&RwKu94?h)@}wROLaIBl>yUT`4UPgC7N zo93n56B?SJ7&Z9Ha5YN>ExWV9+nLY3g0*?;o*88u$Z2wW|?>>F|fPnCgv^;MjqWe2!_EN^=Szig=I^ z&;*Dk;n%Oa(p9m2Hd)@KFL6E66^>t{wO%1i8ot2V9>CIMlR1#VVAfAnB&=+9O z@O;ROSz}J7JOM~cY@_83Il6knX7%o(z%bkdlxKdtdC@hDqxKz5Dzp|Iwx#5SaJPzb z_^1$VPA?7ImRMeV@0|KkTqfjqWdv9;6 z6VCAy48j0@NEYiW;xSo}8*2jx;4ZK+-G-A|CCYmZ{srS4yI}XVmrGMjPtxYcLBCLx zj(2PZ!^R>VTbVVYxu*KXEK&Jw2OMR=yOMLheKy@+RPihXO8!eH6xT^~3yPeAzR+ph zy>DiG_E!SGWBEi-a0oF^YAm2=HjhLBFSo>W6;8YAIWk!!9v*ftGu_DM;}PGb#_*Dj zzUW^dW-2180qI6d69uTU$7}*QFMCHVTUWkN*_$yI1iU7`R$mQ6w~Z7`h0vDF0Qz-_ zH{sTtIn}&}%Ni9ZJqJC!*0lW<`eA#UNWgecNua#k%d z&2pKkPlSIw^Q#^ne+~gq%WO03*P!}bK8O*3%&SD0aJkX*3poN*Tfge>dMuBW4Z6Ab zf;Sm()a`2naL<&1nP~&%Dqx~I>vq7RJXYsL?u_fS2-$j`SCZZ-FTh`?Iy?e_(e>H} z$NvM1z6pHjE^%Bb2BXR>dnJh-*am2|Ta@PWTmDVIL;%{E)!= zeFY$4j=h~sU=}u3LN-}u#Kf#Z8&&Lt^9>DSSbwh1?GTmc(}K8sodr^TUzRMm51of; zShmGEX;lAUsReY)_8{pX`Z4HtyJ!=O0L21IJ@OjzQ5S$%BBy?3z>HY{iWgrnzSG&}Y2hm!wZWBf(2ufs zR@rfXtz9p4XFseQ2Gno2wcVQ&$P(OGH)4WXih9dHhD(qfIx2McxXMkzQzxN`>HR_) zd6dNZ5uyqn@v3v=Hdguy1BjJ3CXY3;GkrfF(C-(08EB|r>0t>(ys z+T;~!qd*q;q4}HO#wj3lr?Lk1*!zeu`ajGam$q2OaFGol3}``}_T{W*-clK>I?P5+ zm|730$D2*%Je{121f)QjXo7tUJKzdm4HgcA9S85tt)e8p)G(1-f%0+5OH^9arXPR^HQBVI znhWc6&o|fv=!W3cvvy#+%9iv}w{C?FuDjgg>a4nDo%02K;(3(7d!cKSO~95Z7jQz{ zCoC(1g}C}OS-mCzvUo*dix-=DJ1^IL1<0)S-qFd9d@E>GhC#8gi!^EqodkJEmo8V(7Kgn3pu&Fca-;p&mx1S!LV zurLH5w4a`Rf|F{M@D$$LAbs2OEK7ft|dWep`zU*ilxo@gViqg7&@7U1#l94uR z`qKr_UaJ6eXja>&f^S%GH>?A@iGaRXs_>EntyNQxg$%$=04m79 zfxW*wPQ3Ju`>&@9)xV@?lvm_`EyV5d^~4K?6N;@=l#k>-g4pFferyGrbvemFxBmF5 z>eWKDzOxO?xGtxr$U^}K*g|^l@GPB9{f@NeN6GAlUNcvbxn{m4Qi;Hf_6V{SP>A>4cuNX_6;v?XM>Wb7yb ztvXAHO2`2a6MyZdRfYquA_uFXf($afSM?5^Zze_dl>>@~B=I#b#>HjPaNnkA=Xfm7 z#uWDUOarC}vp_WC*q9{L3?e~7r~!cK9;8Y>0Dl(_^(kyxq6*|aN#T2f%se8*wZ4>Mj zUdWs%9Q_Eiyh7W1LPNu)fq3L;Ci{fea-9Du&s0=*8+@9`I!yrq==c6`OMtaNK70!R z7@r)>@EahA;(gW2ikCgiWs7f6NT4+}nquNI{bKg|18VJ|+@cls&JcCWwcFM6d;aJ!&{w2X#qpE?6Z>zEFN4nf$SO+m_D zFiob4E`T_IL-G(1Vlbcj^-hBxQ!acNBiP_lI-HXPDlUkV<50S%d~>`2a;*N`d<)oP zB(sZ_8A)|jJ43Qje~bJoa#kMLLa1r_cA%dR1W?OfYlM*CtM)je8Mj%GXZNLsQ3Xef zNp+<8)-Uy=KVLMpcIhZ4Wi2V(aYq7$(;_a-mOfSa<0rY7JZ-?{_466m{m`1;Ubs0? z%5+l?mqg*RZM8Fd=GQZnoYl$XR~|B(?v0=AvEpO3b#p6%E_jq+X3F1ZMQKxi_(~8J+}&X|t$)W!2WIKYaKw zl5vnBlhDE@Dmy)m9u)OPNhMPW&$I#PBt49dt=%6j++puvCc4> zhth{WJTD)I$##XV$LFaJ6lQe4&-`Q5LF(8;G?PRhIknC??jgp86iniJR zGzO62v8bi|bxldyk(@>mXA|y`ezd+0U6Ig9*q>$5)sK~vWD7S`46OHga?F&fxlWUN zK`)_p1(YKE%_;V|*gpRq<+TQ@$O9V*b*AMokP7>d5zi!C^El&b@7cQv?>rjK2eFVU~AXp?Im*QEiv1P4KI72vB~wNF>v`FOo_T3Meo*k{d* zJh}JJOi(Kr{D=lUpc*K*`T0e-ZMyD*TF?S@^^f=tfD~paI?dUa)ZGcKiNX2770HmvseyD`#zX#Q{o@3OCANzK<^FzCMcsM<}5>&T#5 ztqGpZ(VC3*M__QagCR)rI&|d)y(-*7ZQ|jz2baJ>6~oh1mFxHDz}T50m=o8>`}!5* z7w&ka*&HKt>#g3KKZmYE@6ipn7xQBYKV8pIL?yCZ!hPG28Ry>zw};H8Pe@idGN=hn z&@CnBdft6Jbbn4D1e0}_<6G*t2KVEaTi#s1P1tlUd5#%A&%TEnioQq3a%GO(yH)Qs z1@yztBkxm!oh!JBYYZKo*uTT;{~r!h{g3V8SA*gBcNXA(J?)0G%?h%Z->2yOOv7l% zsM-89(eK(Y<^5=8oP;Y4)7m_^Ib>I7ZMCsaxOa!eUc)zb%g%Su=XKduAA#-2g`0nU zUf>=Ygq?m>U>3{IyjTB&kbZ0xzlQ=hLG6Rul6|6lrGw!sQLrSjxmt7W3vV##E)rnE zh_`0w_>v5}Fv zwX-0}Ir7c#-@mg62uv)SMKQ@M>*(nGaJ~Ep0>Rf>v>?YD>Rq*7&V$m@BRt3^y`6SK3h0UpZpIr-ncE$E zw%&CR`x0MCMat6f4k;2AG2-gHR> z?AP8@{`^yAnCbax+U(I5l4SMl&*KhPS_hUgu1SOQxbly(6p})EszgtQN%iV~4dOyC4!t6E4zKe4c_A(uz;!VwlPO$wRmDV(0Y)1y#^#YAi6Jv z{&rsKfcuY~L#QZK0VNizLn{Ii|GuSH4pYJdW5l**5D9X;+FL+Lc|ekYkH~ECRp~o7 zeB#1WS~7NJ5w@$|^MPb=oLYhCk&L%$c%T3KOo#0{Pz$@XqUE(b~Kw*qGIS1grEVrb_J48vi(*jC83 zru0Rt3I>vY9@luEiLvwt3m>@?NQRu>Ll(!&-;<6Lvjc80M^06WW5J&q$&7wAh*%2W zUxkKpE#$(1<|zhl^wEmmj2FwI>{dk1J1gtrwjXD$@H%r*+*xx z`PfD!V7hB=X*_n5>SyL46u=GXj#PmIgXLYrxZciYUdwf)%V@=E=?hLvH-VNik>R|# zPpL!mrBxn@-=TVn5hVNT7#olxv&Ns}vzHT?5P4*Bk z%qT+d+@|IvYQr={+d&y`fAO(*Ht&Eu9zS`G9{jS;#;3=;Vb z7{C`-H=n!(aerKzn>ifbW=hdP_SyI*BN&;#;s<7)5uq&d5`^Q@-Gy6Wo5c(Ppj68`!<_gf&{MrJ0>C0iT6nUI|FXLDv!C`k>*vNjL>lOmuLNfB~ zh&~V(k5zSFlLvVI8O-00pRwsK+z;HknwZtoEj0qIKJy%!xC9K3H-HU&fcGoFStjcI zNeec`LW`^bvnS!`$neiYMbBLyw>Zf!#;q~twG%#rhw+C|UB<1Q7VI__Q^lNq*=~r& z{s34%=Wo==MDpE=QE~fs`5SzcZ>1t{Yz42rcP#lD)WEy<+jCM^^3Ow5H9&*!v%O_T zm&pE*2wP)YS=%@3Z?Ly>x z{2Bahh2aT@e?qL5wv6rH-!t?7=i&8V=VoAFMz^e^+OKQ=n8+~Z|`SX;2ENom*cuxL6~P0%pk zHSMSSqERYLusiTkgOOWH!ym6qxfCWy^YC33-es^UnWgwdG_qmE-xu$G^XHC|bUjSo?aeMAtc?wy*?}`g7`;L*MgOg6#^u@b4=^#bWQh zFScfzMBK+4sA-{h@!;X`L&Q*)_L}e z$%BaRTZhdw^tw0g?bJBbb*LiFIqU+MqJUZTOd8X{B;rv)P*~jzr07}?3*FVft_B)mB zy~v!0Ftp@zizr3e@&r$r#l|;vuZizmOz-atqjxG`VN{%wl0tR&uACjx2k8IjTF6zz zBKSQ|tF?0>tlH38I zB2k56$$`Mh$;qQ7{uXzoyS<}|VzeiRTZ(q)!z7>Qwl2?xL4>)mleKO;bK~GSRB?^g z3yvEea-pH29nIb+4<5{=cy0#w^vEz@dvfEONjxEVaFQXaTl))Hz|73d&e1$Hwfmnn>PUpwVi~@Va zwV^!wziXSCL^#x&YYdcrvV}YOO)=_=`kcBY3%he`YHB94JyX1V`SPycqGCx=kYZtB zQ2ip&{2=x~u2Db`OE3(4d@4q=8|2}N-qobt_ zJ2*J_oH2RliHUS+vR7HurCzn=#>+nWE-$=1HVA8=cLi=h&JYlr?O(z=nbIc4yR+zU zD2I$5%~tb{|72lX+Y1C!GP8(?$nfZB)U3~HcHA65;e4F$Bd$NeuP}Vv=J2wWm~wMc zTaUuM&0*GgqVy4m{w!EJ&^*=a_?&@(fkr)IT=+HW3+P~X#6irF&E|;h_3`LZ{08@A zR!j*EBAXmrsKeR&a9YA0%F>K!HUM zG)Qdqgw_%HiChxrff~d>A!9vAaoWFT@R)#Y# z-!%QrTdtnEsJz+37HYCZI4FH?qQ4{p(war;;=*L_*>~6h)q8uOM4lz>zd|j)a`g1{ z)I>*|{&dYpEHS@(*RhNeb7K&D_ALJJ{@wV~DlUHYW}Jo5iJwM&>|Fz-6jnepoEmPk|Htm01_hIG4zm9l0(nCHeUDry#J5?Z|{d^KKPTFz0ZBd zienvXo#)mm`E%H;mA*d1AQszox;9Lwr2n|C;>y>-6E31UAa6;}&OR1!U7vm1S1+6s zGorq%d&%4||M{ZH*j2g0Hrv}f96uH`W8N)to)K3c`M5n6&{thuUCwMYws(>J*o;2k z>#1^*uA_!gMMFA*bQ8hzSLB7&T0Q6!s5ud#%8|as37>JJ&6%A+HG%51gBk0kJ`+KY z2qrg8)&07%pKD##)G5oBzb`Cavzc!t2p!FKcbsalO%6_biFXm99yTV%x*IrMx z^>M^@Lhtvl$Ekmv`t){>q^?A?HIglu(#@hAkyl*l-oZ@C4L(lZt~po*x1O4Vrs^V;PhhkXMV z%bKWr zs^GY`9FGW@fpT^)t=IdzcsIGX+~>}ntHp(g$B(f0oKJqzGVwFh(sMf6=(0$a3!?_n zFWVc3GTQ3tFrBz{xZ>Xso<-AurG4Z z&s(}#3DJRK%X>RwJ+(1U;)gh-`}%5A7CHSZ7^jOGW)~Lh7pq+uG>Fo&5WCj@t(@+l z$cr`Jb-SXRuo36J9Evjy^BSR(ABYdLQy2 zbm=YCrQHtDnrXQ+rlfLk^_ZdOmq*Atzcbb#$)hUSy%02m-7`IOvW(J@4tp92aaq)h5fIHn-^|#UP5~O-0NYpN7^c@xP8=#)P#3AyRknkvSq9f%WIUq31iHNh>mo1b=9@Ew|_i;u+zMiOPof{j*oMh$J)p;Yh%zx~|>!y@@<0VgcC)FO?@-sISCE7V*OTUiaK=%5%m=>X{G>Fte}R zPm<6y?@CtN55Bxy3jV`Ue7`z(PDof72MB_zpECeV-3PhUi3zasJT&Ya+La=fWs7`F zD1YdcfydXid7hc&em?X5&yNEH38l_$qIg*$zbSL~xl$Wky)@=Yi$3>JR$o7l2zt7> z271+&3Evi4O@VCx#tx_YiQS6j_>CsS;t2rY*gIGzR9-r^>}BZ=y4Z<@-daEOeEOuH zCOJKw35H(~@cKFEj<<}Og@uLtTFj`(ysOt)z*siGoJszX}?hZ~>e(5<1@6w43d`_AiKVN9yT zm7d63*nd3R(a}-PNgef^86p0o>>@a{J$tqB&66B%dl@HNb6_M^+(TvD0lF&&JfkN) z%0=sIoM(TQ&3TrUm9+^)k~|Unc!>R*GyXz?xy6v&FHt?G`Q{@yd=+x{b#_b0x~iI= z!ai~osw(UE!b0=>n}ZOh`lO8&r`6sPQFh_>g(%Ux4qxLf3Lf{xo9)$fuDL;bM4x-JU9H3v883O zIbRIBm6=(MQo14ex*adRC@P9@Kc5kSBtE=lG&_DxF&x*-H}czJ(`V4Wb~4#10N7K}$QkyLyX*g)Z~wiHYR~ zJ&~_y+!pfe<6!2EO-=o$r>9TPUxR28`PP?$BrDvr(X1~g2!c@Og}eq`Wac_M!*UX? znn_AZE{s$NS75C7YxI^D76Nl}aww>%{C!vaCm^4jJBzwGKcjNrh&#G?xpH^4J{f}% z2OrQi^TJt{l=kgggeii;J5pO>I4m4690?@om#Pk5}q1Ug*O5b@cY~z0@rq8fktvz6uy7IXRi0jxHl3 zBdN4h2+nZW5U?J*3r?;!@qth0uaV&3;5%x;6jmlSV^yyZX>$b`88*=w?Cp(EQI!fj znFdLaJcs#SV-u6;Z=OplmLOon)^@ZBfMQiH#OTo6a#7Y4#}#6de? zn0~%1C{gqaXLEDbYv>0)=aGg=TXe}r04l={JsgZ90Q!yyfE2CA9hiMxm{pga$r7w9 zA~=rcC0AC8X}K*H7HDrE3lrb-PMZc9DCz_E4GnqMTTF`- zy*ZjODi_FAYOb7>4fn@FqsUTfsr$$>hFd19ZAF)j>NY49C-_cO1O;ScJa#vc=Y#NZ zAj{C8&mDaS)tddj)#4>G!RV0Q-rhS;6l$h?A3a#tZKD?)8JYs8Z}M%Zr{`mzAtHCg zn`awD1zQ;yS}pcNMbe&JEa&Itomx!K%j5Eped&96P~MZrTE!X}KDHOdt`OC{yJuf} zGSOUJUA;rBIVdRTsj@N|>Gn&*uOlte?<_!(JFH!p=ke?75}@yJw3nRmjZkwvm<^L2 zt6Jx&6|>w5749sL{f#~*R+;z*!2Z15J#%asrqW(SM)@2Q?h}@qfxiku>ZtaU!jzHv zftREODxH=`PT#OKGc&s?WdCKh|8Uwahxgf)D3MHoB~$|wQ+}{BAKGG=oSJ%*W3ri8 zHhk>6HC=@Ua8$XE1t#8@4%PUNHr}84%C1m@O-oBVt=vkA>lvuKKM9s#c5zYe*u3wf z-5ca$c;C)fPD?g7+c>^5fPf}kxIiv^@*Vao2)IyvYJDoS4>A1u=4O2an|cz=6Oc7Y zB_)CxnVA79frsbQeh5=7K_*NhML8Svm^75umT}zJa>_FA0{>IB4RzKp@*hkipJths zcJG824&5>U6P^cVy}&{O=Cz=(ur4`7?9-+jyLsbNW)1e<8v5sO;EU2Kg)P%I%aF>~ znPp~0@a0bJN?6=mgA<+4=9mH=PrH&Zaj zr#<|b&Zf%9y;aRX+vIJ8bCxN+vrYstK~xIFtv}yLVUSJ_gdUWt%Oir?nOw{#|C-|X zq<=}%<9AJUJ911gQ2LmbdRWLSVAA~IuG!_~5K#M5QHN&#`{cVOqqB_ReCI%?qf~gU zYfrKHuhe|j zIh^L_yWK}eabJu6JlfQL{Oo4D`mo4=;ICm-TdjZXrOW2lv73RRa zhUpzH`;*61?2%^-?GW=`WI0R)445QKBsDd4m&#db<7h-xu~ryS#Ana6)?=F)avs)f;qDES02=*q}i(`u{WI|X@ z`!i+sw^m>{|Kw(5`1?j#%hQQPseMzrc?Z#pyIhxQrt;qfmXLjWcWF@4y&c2;;`4uM z0X7k|kY~^o(O==_WR5LjLwq~*6G1EXaT473MgVZE?hqn`(rjI+nHpl(11dqUy7Wrn zUp=t3G@e`I_UZs;OqzmW$y3;PEHV3=i0ju94$*aWbJK&era6I+XWk;?ZrsSh9PIQL zS*VYABEv@f0nVeyPU-gngHo#~Q<>8;(n>929Wyc>s~_Uq&#VDqULP`c*t+}sDZsn? z^=W%mOxAk&oWF+124cv~dcGO8LFE;NAIv_uEorRS!azO~oxjh|@C`n8G9l0mDFp=u zz)ZD}{9ijd3fG;UH_9I{2251phEJypqSqGJRz*0>tbOaqbO4&E_{vY z$zfV3&8)+Gc}#a<%B|@4!daU=Se_XR?fZvsEEv;ZVMbe|JS-$K=Lll6bNLZlP8%pjR5@C<3W|EI4agVQ|7a@Duf_;6UP9v>b)jW zd``CU=g&ub^IrS8MmI&oVt_g^wz5jFkDF?WY(R>Ln2;cBOr+z=v8XW6{O@n3fWwd4 zJTPT$+HLD<3}^bcDc}M)Z#G48gg_Vpv}CioXAc=!o^wb4+YN%tVWLj(ZBqcfCC2h) zx%0yCX?#%pS6_b5XiVGmv>_9M?Z-rbp;qi|Z@&O2fioniZkV54DE)fL2ZRQmcGxCs zPF@bILzlHpvYA<0%7-9;%zN#D`nk`n;5%KEYph~}w;b`&6dp$Az4*A}O;)i7I?+1F zt4PLbAX8vacHv0=-#;hlfH9qNCu)9;7zXcv5TKWf)<`+e9!6R0@-+ryvCW26tZIl& zDRw>``O1!u`oA{=qia|z``?=pZkmhdzc&*DZ}>1y=07*vpNf1%$sgza{2O9%tWAI= z&rgc|&!2P1pN7o;X}YuE=Y^_ffgSB#+^p9k|AiA_{}e+;i%#rR)UpKyp)KTZ_d9{K zGSvL)Ex7G;|8KK%?N^Sq-h+f&T>yFHelh$j#TXu~s;8VK`TpN~Qf(<)?|~HsfuQic z&77qpe<~t+{^7%i6OjISs;+Lv3H^)W0e;R!nF3}adLD9aNsyO&C?$oEchOFm7jT8n z5|T&IeDL4_D{yyobU7u{RL5#!d%C(V0g(yU($giCm8m24wJP?81`+$IL~c7xCnx;d zE1W=G62Xjm3;uZPiQ@d>e*%FhoPA$_f}{M z`OO_<$wsvvUkukb%ZaTVtrDc7r1XRA7!ssM4i~%LgB;oGEnwo#A>^7}m^CG;wHhQF zVPb)hEnTSZIXjWlT!Xhdc{s4XLJadvl?d?${mq~Ti_V%nFoGGQdJ z#{N(0z4Mg?kfL6@F_Cp&L-FkXGf$Y0Lj|n2Eeu@e%$YY+1_AU-l{Tw7f=}}Wk__{n zz!}QkC6cgUMtNR@1LvK&^e(KbCb065oDSgV(uI}q-bNf|IAjNHCVz&m7afXJJv8{) z7f8i^jF;EnT)aSZA@~4vfkfYw!$tr}Ku}Yf(B=ad|B^mg043?_vE}xVv$eCc13V+}5@bXW zH0W+Hq)UV#e+>Cg$S)r~_Tz)SgM*~1YF`ST>C>lA6CF~tqB9;eKq8fvmLf#P+;?C# z?6sx8UFD)-rv=UfIEXk_Fch&!*Q7vAMi!ZZE2$ltSSEh6tUsS#0J@lt4gsqWPQI{@ zJ9hFs0@ZL;uZ}^a)mHLa;B+WYbn{zWf4Pg@NGZS$1V@%5Z=Xm*=1!11S{6Y?fWajs z_I6}z!H(5d5CfdN7Kaa$`Zs~vRAqnfFj3)}E4 z`HP@1dV8(ps55u&+^Oxu9BinSJKIROU*IH6ZbelmrX(gNA~A@sfSG}{t=GkKJ z{gUFs{}|*C03h}!*K)oAaoHNprBGT!rUnSgKk`Rw=c!PZG!=m`ag_hLKS;$G)AG>E z%WKGUcX4IVEO+V}0Xo_BPwC&_H5n)nh=6p>&9jU%;iUvROCV%b3}N0SNST6;=k}x< zSfi#}v{8>){9(fTD?Qu>#pt8gbAbgSj&NEW{HUrqG&o2F#GTF7pK)Ugiy&YjNB9Ty zJ>OMZ=9y0G4+HXk2tp1fCFPYpKw4JyA|%j6m@-XIV5bmtj>MAy@v&MkK?}oW{B(@( zv4~m3DRQA=B@GOw-g<90ywM}Ic~~XOeA)`q-$?X$X=xb*00^WzY7n9Lbp3$?!`S|f zw4lg9Lc+np0ST;O`X}-lXjn?ybYI!z(KNq%Wwp8mToS@oXerw-VJ?9fA`1>&VD#Bz z3lKLHFuRrR#~%Sc(?_8e#d$)ysvJy)fbHZXL;^>lOhL{4xnyI5=dw%qkyeM2OJne? zRI<9dtmpZRFr!I8vv`X6xd+Sn5?qw%h)Nziyp#54V7>$(;%h zzaT=_c~=PdHVa|@cMbpQlYslyw+ISQ0g_W-%=`h!ARGrB9SH3JM=PRF{zT0lOrtU3 z)wTu(sp7s@OYSWLE$g3?@3=S!*5%Uuz9Frase;f)n=igLj(N?5lk}uGA6l7_CK6mM8s(o`&5<>bfe0Jxu>}u~-g4w8ENx`1TZy z`d46Q+rC=wHado15LD%h5lug138uD_9{j$YJ(3oN2d?HT~ zs@g9j9NOq8EfVWzzV?Br8N?f*^?iMTq=5^&-N^iFb@Ke8*PMw9{?KHbwLt^H>RCyeD{ z!p{&5`oW>Zz#7RiEvuI~THN8~C5y8MP!xp)Gtc=3bOzGG*IbV9*r{7qxv?YTEiB}H zp{|}@*p}j7{G{_2o$AFGk@@?;kvRi532X#2WI*5IWNQqR5uNZdhV4r{tf!-tt+^v1 z83@l}rSin%@#&W?%HO;SSYw)KoEz2r8VcUFYRD@uz#-1Ekv+Y=R21ybr6pIv-RS{- zn$!0L*B^p70y?^-vgJw>Ia7F1oyI4~1bRP*t3%}Qx9kkyk=2`J zTR;O*ZtjC8yeU_qX0G9|)(C-Gfb(vdUZJLj`=)7-x$`#XqlPjNX)A3g6&fnDdC6MY zj8MIt{LtgEt7Jq?^Ja8_;^6zNq}iPi@q^-fZ(Jte)fYt2aX|!eblu_RzPPx!$OZLJ z>}CH}wskg46}3V;-ZCFg@1u06ei29HmL}DFt`<@K-3Kaf{q-5-feomjU<(-aItcH& z3=(p+DH1Z+_Tv^IAoIuUCM;8lM5U{!viW9uG#*D)T}AX5seq9H2NW0_oYvHOg;h!d z3i*&qDLMc#4kM{5cVwZZLHrQ!k$w9xVJ3=GqBnHiY{j%B*4l5*&;fzr2)t z*>0dWgrD9nZEroT{l1iK^lr_zU?AbIKr2imzPuQu8t-na5`vS;#~V+Xw#R_i zsw!vDcrPO{3x3CxZ8}; z#3jgGqJ;_B)`C)CNhKtxI1{L@@N zP`j-I5ulDIDf^F`vpq}+{y0(5o68vYIVU|_-%`k6jHQ25Bzus;9sY(j5>fl=U_8Ne zXd;En2ogF_bsz5-nlND z$!aUf#cIN3Qqn^HVf%YO45G}Sm;?7>f$q+hic=|zw)A3urE zgv~dg(lvQR$U{heeY4(wX9pQ^dmobETRjsWfR;}PzL}{~CzVsqJ+j@Cn>Os` zD(T^oRW61}uwN2ubu_1~*mjzDAQK_$tRbhVsj(RvB1;jeA)}H_CGN8w@a&g%s zFEcwk2;LX-5Onq2`STBJovZd%8}Set*G8&Vb8{+!e;|g4kNm_qce5g|g9EajYW5c^ zsVmo0XWb?mx2#6@mjc&K58^7Wk&<@AEC!P251eiyq&c{;J}+cGm?K~EgB6UkX5}!< zho4zX7faQx*mjDMQ4=m zNq;kB@jZtpmzZl)i)6^+PxJw6&i+DNUQ(rJO|e9vPesDk5Ai@VakrVbi=N+8Z_2N) z*RGLx7~PL5GvD9YS_~G2q60#a7b*2J!L-|Q@Xkx>9}U4Xa+^`R#Y1@^zU6sk?sJ{} zdsPV^V$1WoZCaF}bj&0mkU{5QWlT*X_BItKljkztv(4+Rx5@KLcNeHpp#tB;eg-7s zN7%11So*byzV4nntHtfLJL^}Smr4XSR-GxfFy@1on|}974QHbzTHcp#abPhb^WjHw z%C@IL{UZQLbNhr`lVNeBum91l9k=DOF1jKyf)g9)ytu*EGG`~!fu&B@S_=5sXQT@v zVO~zi>^d)K#&6+YFYa-Dc(W?MEiL>Oqn+(|vS`^u)g|D)c5z|KZH;{ILufO&CE}#)H}S@A9_3`(4@}hT5HjV<*6x+*Bw&XQ%Ycfr^yt&qDqj zCfxhaYN`xwaeQ20ZzIkn{8(jFOMPKN1&`&{1bK2=4*S6XIp)GJiKm%LNa0ei`b0~m zYLeMZYx(B__|X&3L7)QyYP%WGj3wv^6)R={|9G!ubI~MfTi3BXX* zUnfZN`x4fM2^q7Fp!xnPlKk$A2WnnTY{*0)wh&pt*x&O3w8MR$@a6Gdvx()Hph3Bq zPTWqL6CtyFOpJ03p6^gj_LGgjLBZ?K4;P0Pc!`CCM9LAn(Z)Q4vMh$e_YTBWpx7M% zzH7LG{?g`NWLw2-pocDEy7HKED%N;NxwIU1b!W6wLs~4l{*a7rTWUE+ZOxH*Mr}@N zLXYzFdorGT$-jdbuNJJm^Hd`fJssoQT1@EL-kBexzP^=A@xJ83!s!mIg)w}odfhQC zSF)=_N4ml{@5sAeO>fxrWC<9@H834-YTSx2O+`jHS0W6i#h$KS(Zgc}d880Ki%j8{ z*~PdEtV}baHU|4l=_2eOS?YJiFb+HR9sYlihr9wScd4RcrS(tl^i<&hV7Y|dp_xwX zzYHzH)9v}3MSy|S6t~7}+srVgH4x9eA5N7q9TXQDohkRaPAw8>HcDy*caIV61k82}hBAPhBe)fh8G3XS@%~X} zZ<{zz-W2uJUf9b#ooLb7uvN>DXd+SjCr=O2zC*7W+SqJbtkHjI#~gIk>}YG7w$4&x=(NP$dnIY{{6s6eRFNhyg# zzu>x%zO$1A>R`&({ZYgxb?C*U{h!FmEVfupZLSK<vy< ze+2~T6~FdUOo8UH8TRN{%bI?C#l;^?!3|^iZFL_v5&}U*!koOmO7Eo6&&|1WQ95r9 z2>dx&f#_z|h`r+AQ3l;|f?dllSqs5nH!@(;Al~X<0C2c!(+r5GQ~df+o-u<3ZBe&f z0BYn*d45n8*5{ju`=0Y&h!o9js#(=sZSnEEDWzxH;EykRxObkxN#P5%KIk2Qv+uyy zUNUK00=?IjWV}=eHuffqYW8eb{G7>R~4SVk`#^(e9x0(SKT_GGG3Uk0(6 zetY6QYC4jCx8Ch??oZhfALqslWfQWcl7j3g;*uN7FveK>VTS7*4OZ@(t?Q#cm`Wbc zoj0^svM+P=&TqBE(zIs=tn14oV6nSf?qEw7KeLF*v3$sqnyw2?U1B_P73ReGAkk-{ z!)yB_*PkJ~tvS55oAT^tN=JeVdo7*}q=|AcC8U)DA{wi-LmlI?R;DDkxYRvK-QciWM_FIk7nOaDrdVP72b`LfU~9 zc7z6!hJ*%W-Vx__x~BjjFckt9Vtb3i`L2UC2ncRNey+D9Ea?8H7GM|if?L3IW!xN^ zS+?{XQ((+;T8vBZ+liP?3K8w;i+tbJH?kd#Ao!%+QNRtd6ampH0)pdycudrn3YZJ! z#(AslF&V!h=7nub;Js;_$6g^d$mQ#uGB$t4?!7^#^`=6bL6p(^kZS>mniY=+Xa{~O zE$!dgCtMJpTaZ=P2CQlMo@aKUe{m zB?%aBCGn=rL>I}PG$_3y*B?*iU7rY6KR|ItS9mB7Ct(fXj=9Ym-j#*&H7+F}a5BRMiJw&$*?PbG%&83)@ zId-KXvxn|Toe@64I2oe?i00w zR=LZ5OuAzOM!013P%ODdrKrf&T#*%httT&j3#5G)p-bY&M(6N+oKhUNTv|hV98od) zV5gE)#=>|Nt1rK?QI||uB}oF1h5NOmFkQGWuwkoR{vt`&WvkY2J&JKx*af`f#MW6+ zu4npjKeR_F5fr5s+E@#V*cj8k0>=F6)|g8U9jFS2#pM2zYUBZQvSMsfcQcD0FAWW&>P((2T?i5aO^=4Kgm$zYmF| z{fP&Z_2Qc5p=Uq$Pa;QduUpr<&V`6E7gq_ScbgM^X3vYn8)x;$6%xq1-+qkBtbi1|{{NbqYC z`_4dD?h{oscY{5hzq*gZ@$B0da5*ZK5dYnz&{2WS02-4Cwtq>GZ z_UOkTU{P1*Hm)v{H>9LC8;}~_bvTQw(_5yJKo$D}BSv@Nk}Y`g@GcamWS7O8(Ef7E zd*)h3?3yqEhwNl*o&BH={86RBgz!kRXkwrxP|XgIx4bp3f&KdvLNOivd)WYz7i<7N z4EO3JO*;|79<|;FvPckkE7<9l8S!ukYaM;a6CL%$o;}R>v1*kIRP5K!`67FRV6!!k z0NjO8On(SyCw#}#Ui`?ixbOW-hL0Y7i5?RE@#Y*cO}Ff`(zY8Cr3Fm39Ny^Y`BKldxKPFf_qF{ao(qm;D!E$ODJcwVmE!zb zofywQK?iu%EEmmwI24`jtq z7kZN<1J?5s_E~jg>&s}pnxcR=f)A9Ipziv_$}{O5?h!$qdIe;vw>&4~VSS*tPM;zu zWT|0Ck|ezMF9N&RyS+AZbGu%@JNBTAMwT+?3-x;k=RL24y)rS=y|Pv>IkqDxigSbP ztE#dn@5CUau~QU!+*%$R%5fqt&pM~LDA`7h8%c8d!~@S?<4h3b*!P)HKXceoiaQiQ zS>JtI%EO40rN;5V_?n+7D&9Vd()pQa2Q69tUWy8YsbJG6{^#gX6z3ygm76Tlils+|@(QArS43<3hqV6zZHk(Ceu6Fv)-`bMRW+=%l2i z=G~F~Ep-dwy`WYv(ZHjiE+`Zc8Auq)HI$2y{`T$0^5wRm$aUIyt6Pv|1pD*^3MNpW zBi~Vluke3IN>fl(*AYsxO8KIxEFz#N)EgjXP$<=>U$`iN!%F2uzVkk^?D`n4?J)>! zL^eW0A-%&)oYOM|U81VIrH~eQZDC`?-1=PCWtewhU2lOYL|GF2INcx)n`b_ld`Ys=q{Bs&3U2Q)L#j)wYT>akLUYqs^pGZka8Tnj%>-V8NQMeyA z|Eeopp=r@^9tdONVnTdl-)&(R=YHJzYeFf%f14u0;C&-^AK!^0I*&ZE^tKXul|UgX zD{gZh#D=DKJP#@>lf04Pc^~;JV}&q5u)$ojcK8?i=6Th{Xsg@T;pYnvpEWyNK0fn_ z+n2xOX+YFVH|73h7riX);fsBJxI9E6NdJULGt^yVKdfJxu@;=z&KHIDP*v@RnneK# zBxZK@^MmdjotB4^f+C_v?z#UZBjXvQ-Q^R+EJpBg^Qk^wchP@U@FKEs_xu9_3av&f zy>~68|5yx4{rj63BE4G!u=D)xw>`KkK_Is;oA3VLmiJbiDo`lUMpHj!05xL z;K2-c`>RjOa%rckP);_rKYio9K33F8S$kg-NRuPx3&?0btI&XM* zA4847mWi1eQaW~QX>If;^Rsk?gkC7%pt9D67kDNmC!ds+dy%aoMp1Ra9vb{LXP@!y z+Y?B3!6`+~sX=6;w}f4a4do|EybjxgP=x_1(jUVi0L#tn6(iQO?KiFo+25KyFiP-H zaV)DC?WJR9mp5vS9e;4--2Ko!>?K;-+>b1(ra)RD!ppGzP``Y41-)a38{PQ4=2|ED zR1s;psvU}Aq&tzFxDW?U2-6FRDyYWeR-AVnip$|G<~@CVYl$eLXh=m`?qlshy&C82 ze=R!3cnv?b+X?gZ7&9w9ECWY9pc?09*giNu1gjocSNw8vgG$u#1A~c@?1zg?L5u4m ze&O{QQ%$+{Id$YpUOo2m+S*(7?jm3yVvPp1CE=akf`ZW`Cs5(TmVLi_GJiO7aYXZJ zJmu#MIJ7;S1?S4_=Q?lxLN)T+ZS!Nc(8LB@HB^%%thL1k&EM3JzaK%lfHfCRg8-gGBld(}qBaS}g> z08hw6bwFTF$i$`E-H@X!SZXX)HpYaOz6S(*632$^LtDbD3ZNU?`t~N4}k&_#Z z5N7ISUX(3U@0W0`FI$M4UOz0v%SqeXa`s1QJg{e{Pg!z2?14W)>-}%tgL|EvEiqL6 zb>(>zWo6QvOAHK zax(76lrMfi@gJ|^y~%Ke?=H6x5jeE+HU9b%MAh8hiR^-c&oO&b93R8NbVr<~%M0e( z(XM!$mBWFVXC$y$;8rwdH6Er5&Fe6#5qaOG36Ziy3x5fCzIvkdVjSofSd~{Htp@fP z$>$AU8=y{2+Y3E##CtrHxPJl}V0#`+6&(NZ>?}Dul-7I*3k%anV{ae2=EFgi zreQ+e^GP~!GZk!_)`2e{_xjz-9E+Fug1Zn z41&u6b0f!vT(j;6Pljx)hkr;&I%HzJtzACpS|!7`Mo_IkjGN_+M&Oo2;h1G z9_Z;|Rra1atk({)P~^$a#a-!SeYMVqPwlIZ&RY?J0}tWe^W&>1`ycNDz84fdJM`>* zZ~IJB;eXdp=^Z{Dc>-xNxO7n8&ufc+#)W9fZb}RBf6Bqe3mKI>`vGnM+@0HH1Me6y zkJ1TBhoh)j$nzlgy{V}SrSG}O1GXpgg|6Q1pUTf*&j<+08Lm?!4WK#x_rY@`Wp{{E z-Gs(XWo5gyFIlEg+$HybvN?ELbGsg5N5OSwDH759R%e#vOJ00+&(Wr(CxB&5D6R>@Wgvvv>dYjg(ofNNyW*XN6E6IRQ`5 z_)o6_FI}oXdAupyu(>BFSm_M7&i0cz$%nH~^g3Kx+SIRtTk5c6_Oli8SQKg&-zp{f z_4fF~wfU3}vLJUTZ?V7P@OgB1bdzCilVbPsK%96AqAwnShaI?!={3#!(qqbtiQft_ zAAy2^c3Q-RLu-AN>(o9g!_x9W1+@q~@%HropIf5Pl*qQ7cCUXgM#Cy>;OBEA8Gc`9=PNjmN88LB+7e4zSZW_c`eI`p~%`wn|?!*z)`cHsI z_TX2f0X2qeu#-z3arMpv;YL0b7oozi84JK647-?i>n_RPr^32EqSGOA{jW+rxN?N2 z-Q%x#z+JW=63y7-&3Eztz5ZwPpOAr_C5H5E-XlC01pi7UAWP}sKo_|1e;WP?4VOaA z^4|_Tpn`@)|J(5Dsi;>!|1KW<4p-7Sh~7l5^scl^+9@7QiWJ z+1ErZxu6aIqQKtbMsyTjl_vWi203XtOxnE3TG6aTOT)eSBi`{KXWsk%f05#;p$&ZVys*;mj(v{={Yks~9)(ocQ%fqdQe^ z^RiXP$VB$1wf4t6&7w~|H0C2(6h9agXn7F2%*x}l2UiANTYhS#_$086y*d6_m6Y?C z=3gB_d8D*pb%~lB8@m6gwuwPYZt&8YDXd)4?7S&jyjMq~jn1GRu4l>+k6;Lj5#g=% zK(GID-D?}(b`6$tKZlZd?M*p*^{?q#C#8@5m_qKq`Oae`eo=kBo~6yiYDJ|*YwJ-+ zy*@>v`3D1giJ6j%DDBf>=_zSuJ@rnm7NVhVy9H%bOgC$!Kt42-VL2N5p^yhYJHA}G z0wv++Ck6s&xU8HE-aeEn?Xu8tsYSPLzFO~>C`(I8#dB&bY2nvkevKc$aLo-oeZ)KX zTO;iSn!jG5U5dGk>wHpnVO@Czu5cqPrEFQ7>hkv@7xe;sOLka_AW3$)Z415XL)`-S z3RgG^t6~#mHebUU>GC>cc0;iK^;1_S>v9cE^@LS$*^_Lxk+HW189H7dxK` zvfy`Lz@eh6&LVp!&uk^Wp*)44Xk?ZidC2vZItJbCCzQYwGxgPHt^4p>4a@^YTCk|z z4MApnyxsE&O9ZJx7xiYdh1(-Z17~SFjx}kY5{kSmkUUWA2@CVYxnf28O3V!JhvmVX ziaPZ|Qylil5v!i3+IM;QI&vIB+Ri#>*MI6QB~Uy`rljaNUkV9(~(R^QH9jH~3si6lR4FdoLOeO!?3dx57#?#|?^ zOup4*tW5LulrxkrU&(fJuVfBUx+Tb~N!aQ2I+^8!m1%NV|IsNwnp~X%6#xVQE`~q~ zvSbQut#62)1knZGRxs^gppQMIV1BoxVYycHrPJ<_a8WjYLJY3&hP#21uN4wIH+jJA}z-kqDgn#xivNBP7)>hw1@^=M0qubxyd-gbOp_O)( zmZkmtwdN|ae+|<~=Ka^Lbv)+R#Q1-4zaU}v;?kM6`tp?QpxF{vQ1$dm*k!AHaEI>; zgugetG%V&`f>g6fcQulL7`&T?< zo{0WvaTFV-C??fad{D7@W!LbTPpM-8Z~eIxVXQT2ReKlh(P`L*?y*uw9w%nCx3%(# z^JR^)x=#b{sf%>@9>|V4?5wB$gT3t%#$=h2X8M1 zFMO6;%%};h_^0ONw|ZV94h^c+PaP3sElV{PLKGP>E<=;*A6(rcf<4tTEI8Quz|5Aq zjX&dAu)y4i&x=0C73FcMiJ4DLUee6n$BV{ovX&G6*+y!Z693lR(h*A(BFx=n4! zD))5we>)=PotT|_%n*&uE;Q?N+};)x6ns(XUc$}8v->s(Ef2xi9QN6e_@u7`R zgAVw|2RByP?)6!Xwy;VolrN|>7HteFmP;Z!2m)IOw$OFVkNkCxnZ0=WaA?oVpg}7J zils{H+?Fps!(6DjZSnwt9$oj>>eb*xpqy#wV_ zIW)9OYV>;>B$E4OPCp!ezZYVkH1vE~8cqDytf#_rl|e(lfH$P}s#GHpS$QoOFyIwo z_oY>b5K4J4PfWJuSXA=z%XYg(wzh-XSwf%K zKhO5uxO!U8UGfVKf7xmy27Oh8=ziFCu{gbK5PG2HDQRN8^rJ*!*FLtG^dN<@*Y_8U z%>rpWi>k_VNT*faQ@D_3JWQ~`HhW{WFHteFD)qWCUnxynFOJ}gL^>8_b z2A2fb7Znu|Jl=eY%Pw2vkGdg1lq2Btz;4u8^Yrm1RJR-MN?^zN_Z5*1PaajG_>`Qi z01}NjPnCG%U5TlP>AtNU)^=cAw;tfk{T99S~F57BA9VM`}e z=1j)3__(^c6Rw^&YWqU zKS#|c`}ONrQq9b)ELppm)}C6wOT6a&oXA})Dp^mEv&3hpc})qPRP%S1IGC9`hf3^t z4Vx*}%1@j)afFv7^~;w>@c1)oAuBJ2ES(}vN68V6cD86PQ^ll@K4kWFAC{>P%`jLa z=ID%L4Em;KpI68`n@4&jfML#DE}L0Pt8WUobKm~{{r-m5-d|GkzPtgV_}9QT56)o;SX>{aMM&LLf2LN98xvU!Fa$BU&lm;kgxk z)M9U2mzTHK?6GMnfY}yn2f(dIUfF!Hx%hBws4_-yCh_A7{FcbIZ5?#4s92j-LA@&T z=%O&;O_TPWgwf|!G_JVH3u_6&14Hcs3cD`Nik+PZ^cYkxSH^0+K+sCd4ECBLqI)rD z2-WV^?yNC-X}hmTwtFBe^3x~y=#!9!WY}6X+nFl!G9e%K9C-gJEGZcndejcACPdZB z=H}*+y;MuCu&?5iu#@GJ3rj1`lPk`M)!W;1M@|Y`C{)Q(BfaC83@c+`U|?G|JP7-< z+Ov$3A3nT0>KO;G2kC}Ie_d}>Aa1qG%gfJi1{}`Ikfye_KCn~DDk{A%7kAG9GWE91 zXBf1Hy_OgaZ1g88URMzzMa8aayNogi4KJJOdFAAWQ6aWZfT@mZz9HSXR z$aq4DXFpRc{{>adEgSjgLxFv^1L0}o*SQRlXNh4J^UB{XDjVE%O)piWk?+1?J z=-j6;)roxM3-)$D&QG(67w<{30gQ8OP>KT(VmET7#l<)kbp86IF)3;`_Mu1*fpY)rb~4ujsmn z(A3q|8iP_CPH3SBjY5BP3v6b3;^|pgA3~c8ZU2BHyws)IJ&u)Q z`D4rE#MSe|oTeWb=7u7*wKl2>3iFJHD^Evaq_=j4O4xlm=(APoM(o449T*xm-`?bU znrvB~C)IaHE0-Zo$T6$0cbZTs^K8v1!J&kD1QQ>6wQqEOLOa}SgYt;=qO7!x>UecM zO&t9oRdPCih2HxD9HRp7gMK8M;cwvylq)!?!Gjx-d|W0f!^G|+>T}kRKT_qyaAdxI zDxkV^+j}5lp)xYIv=E|&l68`zjD6q7t4L*eQ?|xd zWM{JPODPg#>|>WOw(MKh`R-G_-|ze9n?ERXX3ja!^W4jI-Piq`R$~84M-f4OB4^nn zll3LXd=*G&nM=@ZlS?a*?=fPNdg_Z@Zd^~XcC_c=Yd|&eZKggh&dSmP1r?^ULLU!{ zhd$j4@ub*L0x35174caqc6Lol`gU$?Y{XMvMn$Eg`^BiflWXg-J_sl`DYd4FMfOgu3VO<>68b#cOTeaEXlO7J0>r(XG97}7iaZ|!RC(PI-Gaa%ge zfn2m+v+CnE74MbkJ(YeL)rlG6ntGB-!#{`j8k5rcy$x!6bY{p*|_OoDX=w3U!_!H{46 zeQ6@MWrb03=i7dbp~>aA+=IGVp`#SmG&TIi1VxX^yq+Z{Nmsu;)}<%Axew&viwX_N z4l6nU2B%IKk{)UV^3!jbG!5-Uhs;T0Fx!2mOKvkbz<9Cnp4;z8fM@|mQuWpE_3PK^ zhb2n;rvFViF~6{I3%pf$_~DV!(UlX1(+zK=@!-OIYxMI7S}9Jnk&!3{+r+_~Ic(31 zprEk&krvn=qP@45*Vb70D!-Dq=hwz%*ntQhJn`_y=VzPCXS9G58Vd(c6j!!XU~Mcp zz#C%c+L{#5DAi)o-oSy|wQJWkm?pLaaqgkWqZ;ot$FGrEf<#5JdDcd8l{AROE>^!X zxi<1Kn2Fw0ZSFIVLRIdFm#l8>P`~0d5h51LybF~~(ic&64qwf;i)Q;bBv0RoSpR-| zvbKi=<@)r#DUwG)(rpe({z)N`%i))ZNv8~sd8qiaWOz$h-tF4r-I_6Sq4rh;gGlnE zkn&+7Iz`hK>qPY*`2FR3BKf+y-c(G)kxA%+f{A{CR-{xR5vRz+NtDGEM= zx+_O+P5f%4q%t_W%>s`ou&gF4g2c&C1yhwmFe-($6fSXUc=*IMB2KzmUgO@oq*ESO zm$}Xie{F1`jy6UOI<0FB+xDeS<74jvMZ|tXUxYj5n?G~ zrD8B@hnGvhV|vlERi#D6Gj`QI4)&7-6JjVMq!s7SeLz)!wFYF;-0VQ@Dtu zx&6z5%=wtuJ?nd4!8}pnyBBY~?J4tybbc{8jwS8Iobj5A?fMy5Evw#q{bhP_ zWvjQ(Yibo|tjMy#CK!REl!KD&jOR(k-3$(Z ze4g@*a0v@wCumctjg3~@)A+2oI}ZU^Gvk;A3(GSjb8ALQe&iK0Y0#P;#|NA>49>%B z@vu902q~C+dg9F^kqeDY@jVi*<=A144NYAB$>D&<*Snu)9YT}#jS{w-MAGyjbm=^{k;o8-@D%EPwB;S?61PO z%QL*43}W+q%spxwnQ&q&Z?!!h3m-t-G(^dzc43j(-olZsHs&NcwMBK?wr=pH%i@$t z+v&MXrOnith4#)Am>QOe=UZQ=Fax#0iVm4ULEX9X=>?KPTl69vZJckcV1NZbAF^4TawRjUg0IiUSW?N7BSXnl zV!G>1_XnZ3!+R$K9eQrAPZ#E^e5bme-T3vi!=DM(P;yAG^_lyYi>`|=d-$9KmBH$W z(@}lt_=4C>hYKAGgAgq0ds9F}O;yX7fe>M|)@D0ja|uEiqEtA^QO zIY)`u`pEf7+)v5KEU8k@&7irxR+Nqoe}yf8DC6eSmQKG)Vkc_i(!ItxI_P7p24Sl& zIKZ=reMd83-O)g*NlLc8u1wgQsk@B=`_tJ}Wt79tY1GE{=3OCI?wgsNgxdx%7Dw1B z-m`bFG$e&;71;-L@fD6X0r$775?CUlpiON|%2eK8>YDRf+O6)TufisB7e%5R?9j3J zsl|;^t(ZMr7-O-mm_&2AU3j_EEf%+00=aTJ*NX zQpIOk-W>CW>yEyf9hZprj%!ZXW_;Vn=6EG8GjpH2=9cnYuZ;1GE`1;5{0NLq6dy?8 zM0}Kn?-^-ntW#S8rJqn)jXMl?7f(nRCDcY2i>nnE7pq^pCLlSv;+R8GH|JU6B2t&` zi(iIK#Gu*sEljuqePe+k1l*9aL_|GrP;;2{%6kaqK>|v~fP?&yqrhyQcmmFJ2*D_q zb&nxN@q9YC_MCPfz4|mgn-y1BpyKKITuj%&n9llMsa(CV*pOD^6-rvCrWoMHUaZy1 z@~$-t6ilcRI@5X#C1i8Fc-pygUr(~~Gk0yu(M~>{6RI369Z$kLXFiXhrvB_|r-@tA z-TOT0+cshLq~!<~BSzmqwJM%IM){aS;IJ#w!nttBT%b@WtZoE7y(q3l+VHTWM<#^L zZ}s)9;9N`278Vy`6G`D^@&jf|d`^0#JK67Wl}t$CwwyMbq?scdgG3~V12eXcAHmT8 zpgqE!+9Ys?{lNQO-s=ihU8fx!$~$UQV&+|eF1T%;j^9Q*%tI;e2wdYa70kN4Io&;L za7Hjw$@RK<8s4!WNyQvb3cL~pn=xD;wL5AwBooMeXgM34;PaXF%Jq@a^olaWrI8Wt zNBI*quMdixi^(xJ|DG*mFAh$@HILey*737)4_`cg{`%Fc1c>?z zb5Jjq*clD2uwkv^%;zfpfc&td$V5p*r1#*-b#+qIoO@sPl*5WvCJwHW+6jD{H;j#A zA-!Ar!hw4TnDQHiqHArPb0{U_fsA1(1I^>kgZ}PJoD1wk{Rlz%#Ah z7r3T+`a;5S>-x=9E^z_oagczv@gzcp3+BPaYu%pJvPRyN)N6c9=J9_ zqot*!ocJjVSWm#VtQAu3At||V(|h*-eYYf7{{Y}IwmKR|g>-df&h@O_k4q2C%-&E> zci`JfDxcPeqRS`^4LU&m&d<*`Seq@FH`Hs&)Rtry*5_B<0ks?n;B|tj^?(d-_$7l* z@n9o@2xK7N=*a&BKuu!bjuAaTviI?WD=CraaNWxft5LZjEzPf9*>ohh)u6uj) z%NV|SZ+fG-X{_A|NR7x|L;J{$1MxlXO}E&dHQ>rp1U605TjGolEmx+0Dv6T9vz7E&&fFrq8?EZk<0uc$5SpAmBaV}p1Ud~B?a{(;4REPM2CjH1CH58^QU`G z2Xy2{;!lZ*X#?CY?uQ9MyonM2NHZ1S9bmuLL?XW8SRF?VY$|RD%*UpC83-z%%)aun zC)19Li+gBSrLqCQDWXyNc>0#?aNH^+@(3b|!cijxe&$kj*1c1cWcf?Wb{naDjraJr zvC4qY|K%k6bkda{%m~0-o-TDwZ+soeK&=OG%aL0}KkEMd2u_)>ESI$SrfqK0VN`-T zRP%vS42f#aQUCrE9|{(|-WS=2fhPu<@GLbzb=Jbe}e?(i^*Xo0O)RZhGrAy9Z&(49toH? zkh8!V7sFs~Yb9#~SUxE#y4x-IIy~qCt)@JfPj~A%gY^u`hp?UJP@M~QAc}!N1+mvTQfX~OA-JUV05pw))V-7^=zJks}m@8q4n9BW93moW) zS26<#pl~|JKIltLuO`aiyG<+B`tk=0*IJXvwEQ7n7EOcYNOlQ=_ClS34K9~$?^c3n z5=s9(0A>rORLD~`qhzijxa;TKR%KCSfn^kXv{rkBK-*=KE}|75z-* znTTwOp#`Uj&g^*lVN|>r5RVUBu7aZ_de%!IPUG{d>yyQ)PL9`MgZ?d7fJzQb z5II`5RsI@Ki01>x=uI;_N zHcT$`ohG5tRG=<`^7O-2ThJhz6EF;lUxWdMG`jwx-vx*1K#EIoOBF!$INlKZ+@rceUseqBkI{>iz=RK?lG zV$rn$hvA%51P{D6Z~tG~laawPK+2WQ$IglqFf8#NHOIP_7L#1oey-J=2wWDL2kLj2 z%pm!YIq?SPm?Lr(fd)7kNyKmp*{ zwTwkCSh(+>)bkwT1cGS^918rn`ZP2H}zK}rY~rcG<*yuFvsqKyT>wVqTl2j9)wBYBv! z4{iGQKndomDetRojb7RL?Vi^zl!pUjG=AT$IH|Yt{4&=+x$huJ(t*|Kmyt<#+r)FL zWqty1WC8GMakG{mvOwHZ+vovavjl&L5`C|oLXL_GwR7#4z|Feno4dh+?^6F3Y2sbc zsIyLEFT|XEvM(ZNW%#O7{%X_$-Ln9LNdsSa%P;a}ACFTPvG77GKhDg%W0PbwH;+YZ zEAeMW{7QO)6NAt2q`I|5#!T4s02Hl2nlf$g;sAyE;}(k3i;)M~1vDnSsfz&1pq`ah zPs*N8A!)w}eu$#(`FQ#W7(@b8XHtM&L@E>h`*<3rXCe6np7kNrCqfgW&X_wF^K9sI zJC6g;UKX}F0MF`J>i3vg_5Ql0@tA$p;STEYYpIsI@!KDUfBqbxaxib^6W>HWAH)Er z7w@UH@*9-X1M<11(?`78{DS1y=Do(r6b?yFw~9Ek{@B>{Z986~uWTjnP`X}5hMg9s zBI+42W5n`VHiZxJMYqvtVscu-A@>bkKlRe;6%DdNPvIDpB3hv&VbMFB>OK@U+SYa* zt@ga2=3!msFp*+K@}xRXxmuO4de%|-HmeSxpUBhV4sV>-+_$8kKgXx3romd|Ju#){ z{$oUpZj6pK`KzkYo;uZ^rtrmJTgMW7=mgNX%G7g!WaW(4gMvNx|EljtG#1fMV{hh3 zn|&pCvm8<|@88;-Aq)&sV%0>Wa^whK36AGE_KPQ!8cIYw-`HA{K4^OJ^VT_sRc!wS z)Omva$4S2HF;Op>2wq%wmutHLp$n%C_R>d@3_?)V*|~GHb7M>AXT7A+%;xy-aq@Zl zkrOTTjXPnKW8l3;1fp(f3$(;_mO#0{7`==Ct+rF~D{7@f=h(3<(&`us^%e_@6e=8nk(oXNYMBOGR~S-d>I&`^ z+v{s;n$&`b@X00L_8hq+ri^-}?J|gv1Y{%Bm#ofWvh6Eanta~PIZP=cRjMKAH9Xx} zbxZj}V61Iv-$`+C9re=6;V{p)^T?tiv#kpwB1s>HAz%%`S4yA!n6x%OAOFEKu{~`D z?Qu2K#__T#NA&{~=as$v*Ge51t8Y%ZPRTIDd==^GNQYIB>(EX2L{vE-070ZnqEpM_x!Um9 z(K0fBlY*t;0Kw5OyW=E9=*#h=(7fR&U!N>m z*WQQ;S*j6IUjezpMQ3@&^Db=S2{J{75Ol=G(cgByWv-}}zWRAS6_YM(#G7AFY!O|pd0zL#D z77EXfM6p>}4g-UwWHKqOqAc2%fis$Z*m!z52BzX`ZP08x!SYhd+<_Xe8{y4nza}V3 zZHO=p5uF5=UeVHIEtMK5s?)yZly@OaRji@wn@Rw0=Ud(qV(~X^yVWHH_S07|H6FiQ zPVv08E6N(qE~uwiWsEW&{AX__d2K?6&#x-}EY{>gNbvDHOuu^+s(j`l9R>J|gCrip zUE~T5xU#zW$3nFaP*r#B5Q$#dDd1^YN}G*Oysf#$+4uQTp^G=aaJ0gjN$SyYN7HgB zKca^ndMFo`E4`~MM-twarUngFTXi1a5xgR4b^#IdfSun#K$FCT^@ZLYfs z;LViZX0m~OiulOS<=+UTHt+0G+F zj@Tu5HeFh3>a%-yufraliH&he1DU3~!~ePaM@l1ko)Z9+nb#Ir^vmJBPl5zx+sxdz z`aWA_o)V%s*%sPV*r&`L3{yRM!C8K$tKzb1y@s^;oO-2O{YRl3`mYy_B~hD3yAw(B ziXCRUx1LJNWg+4$@JOFVB?5wj!$AtUpEIw6gd+h42?!Cs!*roEm>up4RaJG@(#!?> zxyBLh3|mxsC-n6~Q`mm#MKkwWa*wLwiG5fqC=kKdlU#`kf`*uq$cuhGDgg#=iAU}x zUQV&?DI$sWHB6D3xAHj$^Vs~?@e2hHdohs9!c_E{#3!f|PKU z&h*>-sZ9$nW^O{jbOD`E(>~slAC}^*JZ)E;As$#L7J%PDD&lG_z#wnWFj)qDTp}MS zQziripASJXmp?tX8kz_d+U-y3X=c6*eBVB<_z}sLyJY%>!QXu8`3X*zHKpz)s|IrD zUvAW43blW??}K>Lzfw7dD#Dr4Ko1VYPdg1i?qi@7gCYs!nwJ+QZHJBrHV#IMh}{CM z7l|XIV{%72xi&#OVQKcv4rL*mZoV!=B?>Xmt4S(!5rWrtN34u=8^E4>S*4pxjEpl) zxluhz{oeOgz#&{KGfatyAh&|^+}VBEycJrrOn%=dl|QAj86fn(L?R_=?Sp>Hl$G883`!IlpumW`odAhOhe zkL)CgO1U~z!J*70N-z+hl4a$zVY7O+d)2TSl%5bfP7j+AzRf?kpcq zH#s%@wJX8iI{{a?ghfNOcdMh~6?`8Ep8l&aqxkhSWXZXUB{KPZge`&t{!_;~=whlS z-2^Qg)RqUXvZDO4Fryy-NnMMP=~TCH?<7wjc?t8Q<|x7F^AFsZ;vcc{*eGPk9wK+b z+Y-nqhPxkx?)+Z?v{oNs>UK|;$^ss)!~V}Df|dMx@dNYX-_xT|{^D@L^*< z*z`;}gKT_E>aQHz-zWFD7`bO)y?^DzE>wK&e;*ufjCs`N*I}l1^KYBrp8xwM_g3Ye zg@V@cL_y#qe*0axC_(4xs|j2BlTwvPyUJzInZy1c6$!aFu_prb=*@svn|A zC%BCFkmzRp5uu^Tp3_AZ|J`^za^qcV>Rx&dmpJr^jQs0^tc0&j-YB2+KWC`&v7Nue zb-C)_6|&y~;{8L>ca~>vVm^QQ@(7xKfMPqyjIzD;N3zbje?#Ut<9_jzC)Hj0-I8Hn zKt*UEEIRh*Y>Ng7MB>sdOL0Hcr+^nPwC(M4kkbB2`HUzQ`v%4%i|fw_Zg%_v@d10! zyS>6H*8>&XEb_;qL6Y@G-Yo}d;ZzrZrzPBmZnL9x)`bV4C3WrK?{W0;#{ViDI61xe zXisx3_v?rL^>uZ1fk@MEXdOe+@|_NN*2-pb8m` zZ79^0P=6yJ4A}py26*$5?|p72)JynAAnmX6`Pg2+Ys_Y@Rd6zX?w>G@BtV?sUkd~&ddsk{4_c4})0 z3zZi)t{4%ut{SKxJKYy1&0`+fxRVg38)q#?UB!1zu>7R$3BTuzo@z^5!)H)jUMJ2i zM3njQ!PUxmPSfg!QBkSC>kwmyTdPXZpQ{k2wQ_Lw!0pEWl9R(gwfy)O3_oo@woCTb z^2;Ud?CmT=W|pBdxl;!ra0}2)@=sXWR<7f*zyGF)R;2c${QGY%nzPf#|Nb+K<&G!w z(G}K=2(y1(eCt0h#%P_tgIwM&DJgjwb@aU$S4~aLM^g5j`-MwvY|>UAsV^P1(;24z z@ZpG|1$1%WuSep}`@FmewSV38E?6mdb#>MBP;1GZ0h2Ce?1xdz_}3!`MEi(e%gQ#g z&o**5O{os&b`RCY_7LzR*4;hLP8}0cbvr&j6Lt}TzBBFHn|CHKc-nPiV&6QsZTEA( zXi45W@ePubQ`UrhQB1By4aZv3Q+@JX%3m$=Wi3R9wYSiY8+ox6Qw&LQf`;`DCXPX_ zfs3ioNdu@;NT0Q$l6!*|yMpS8zpbj|ay2P*%2$|5r+fRhZJ`U zr+)<*Cbd`Y$;J58w@c3b)~>+z|N3(Sx=}{SpUfar$1olRe2MgLEfl z4NvtwvDRFeggC_xQ{>ocgyd8-1kND{FQ)z6Bj%vL6^~}^oEU%}udFQljZkr(b$BQTV{`+-H9 zN%#2HMye%CVIR<7%Dquxis%=tHTd000KCOx7`4>;V zxc~A~(%WVDYr5)}bDI=|1#NS6vxyqrrycn{TMT-SNg*osJ~>OQ{S ztHW!B%<@wTRQMHOksZ4=Uf}ieJd875i>0Gl%Or2}-RaaHe>f0WJ97fCfIRfzwMa;a z`tD3>={iw>^>FH)*M?uu;*s=NM8c>r`g5dWE9^;i_225yF+Lq(n|{Yv4PpHC|Ny4XMA z_A8PNRDOXz6M5i4-y47{4*}2p85AFUudG2Nm1hmB;G$6upGpr+JqXNJn-;%m8AMPr zObLCbI5ne-OfIABuM|4uvdn$WUYS~!cK}8DUOJp32G-lAu5H?jWu}V?7*C*OwJ_H#)yz&kThW7~ZDp)i0Y8C3v-9197(4mUtHYeotW(#yi z(PS24>blvB{O3a|3=Nn?~Mj#F8N(voiH7cGm@T5_r_|D0}?WH+s;`W*N_+ik4W0SVvzFt)5Lmfk!@(KR)mi0_Iwa0JA1B*?5 zX7l;ItwEmH`r3v&*j!9;GVeFhix(@zpRUYhY zBDsG34ZG#|0ALZ~6emf)zvE8v+% zs7dCNlDCG)_93xN<9k5=K&l*D|O~ z35(l0Ui9qNFTA~lAn;<)W3qLPAe?Bj&$rL(kRJg8YV!I^E}!E`E}^=W0KO*oGkwU| zx5=Jb3MLq~qHXK6($8c(#fm?7is2?pleQM_nL>qW{jo;TLsu&mvv~oz#QLqkv8}<$ z`Teexv#v_BAgEVzb}qOPd;6t%9ky^1mlBg9irYBnAcy!4NQs~d@jwvbgKg5oD2V85 zSXg{G!RNQ(;aN4F)ZuA-xepeyRkGHfo_^Sb`vdT6ZQ`!>YWUaP6PJ%NZ8VhH@5CEg z_sLrE;)RC|5xE4naM{|8Z1sHD`iL&F?C#yCa3`{BoV?lkQ(Z7fZ?%uF!C_|7r-@n%Q)ARRRj~S7shS(N7mU4r zr1d<0_QTB!JPk}knd@zHnZtLFsrRHO7lMfPUL9vulqGUXNgj&3AY>Xp&;RW^W9!LH zdag#-@htwDI)|wsa=egH7v#3d9vyZMx?kB!AqELji*K<_9WQh>^#xg2E(mQjJ*{s= zMF`W4Q1e7a#%N7ty1hUma(uhTsv_;acx3<5)5vhqpz&wl%8y;ku?-h2qwvW^|O^)p* zZQn^@D({XDq`dLB+g&sftyEpGsEQGNW*;#1`X-MnUv2H;fGf7TR7@T#GU%NVyVWvG zKY=K|EMZdE`IH9VnX5ZiHHGIJUupPdC|1j53b49GvZUfESKbJ?`hjVKP6Vox6LAo-(3y-l#!bqi78h%X zgOe3Q=Mm|h#bytTp9x;^nsWQTTLwT5;N_aRkjQO0>PGylo$UDgp7b98t~)2!RXLpU z6E$YuSHicTmbT3g4ibQc3!2Vi++=tiYKj&fjscvl^BttgZteJ)Z4z+u$k-fAdTdk&}`2I>BW=lte+4F?L_HI=XQKb+}7%d*?uqj z<1h!>@R1_J5U1nvnsei@hqo3t87koS<~nj9CUd>-bE*V3EEx`H2)PY+=OW0vNIIKH`*SZgQalmIIc5BF-cxQIYT?kuoMay>q5GXM6H@U~?1_ zxdQx+Mo8YCp`ZnfZi-aVdd~&WVKOm@39Y(K&R9{iXNv*DHK80yHc2&_TE6+VaPn24 zK1hO7-kC)U!S|N?a?pcRNFU&d7>LEIdIl}hvM4Eavsv`+ETM9C5ab@SHCj<9^t}Pf zEFW;7VY&W*MO4qwD87u6P+#Gyuk!MAu{;vrk4>Ru-&Bwd-(X+s@#%w@lle2VGTatw z@lWyzngNd|ciK6qcuZvGL(rzi;HY4>eBfLY_*B5bz>qM(JlX1JS_*h2T#HjV-*5@0 z4>+La*S$D0W`QYq>~^;uTx`VJ_ScwAa*WgL6!@v{+#nAFY)%emyL^%Z5N~qsm1*D7 z0O+RXF)il>SjndXv!$HP@b#F?t={~4q)#2b z$-Zx;=c%vY6>$;*n5Vo9hJvhDN=qJAhY=-a-|Bv;A#S&h&Y!`*)R&dL+b&6VEn*s{L#|xaUK8dAVV~S(B&L_&%3P=tR9s>cwfC|IG|p502Pl zr^89(LqoSissHP~!vNgsXqgkuo&4(R$GyG1?SY%J@IB4K;^N_Gmb!4G1SP_{q*+IB zI^7x2&O7BHPRf05ihXYPQ?{FVhH5j}?;d}hJ=v;h^3Aa2U=JvEC8a{{*3P{!(#bGV zbS4cDPGpIlH-E!19>h$_OW-j(_x8ELMJl0Py%8C$(Z&!+tvY26rYG!9=Q^p6~0p;ATDR+J2ZumQ&co-Zf=$m@}xR%f8+(g zTRJ=Q|096FX#f5&MxNX}Qejk1Uv2seD-;ys(Q_h4S9Edx&&B^^jsIic_z42Gv$tpw zmFu%7X)*35UwNQ#=J@fwneCTmPW;U-%Rb2Veca4V9K`K8nB! zkTi0kc-AwNJGEAyq68!*bwLs;P(G$wr)=I4Y!n+C+v2-%DVBPt7QoPTZf;f3pD`N% z3C_;WLyO;1owzI+my3jjg~9K6Z^J0xynXwAVZrv&5BZ2z6$W;>r+4h^?4+p0RBm#_ zlStUZ9MZ|tgQP!w3Rq7Gy3_BT(=lI{lpNmov$5f}xgtirRKzChzHpmgOa<7mK45z( zEGD~AqDO*4@s0ttSk=elu>dsYcJCc)?d)`RasA{(0lfQMoVNA#XjN$zsX9x%4-}F$;UVNL`B)ry**ExG>v$2@^r$tjnL8R_^G@} zJR3JjZB~QrZayV(E`Lb|I{Mr2($$j)@T>w}%zGq1GzL8DmUd+8&OOUt1a2tfPtn}F z7pLunsP~abm|-6MCTW(N6cuuECvj;^klK&^QOXfZp!mOAMEGy1^Z#@4|M9}c|Fh`; zccjRBY&QF@Xo)G?qrH6FqE0=bNb`ZAnsLq3TD}35PA-2~-GhXr8&|bra=xw%LfT<& zfrQ3;JnFN#X%*$_E3$Q>cr;-!G`Do5;BoM+CMR6OVUi)|*mGrzv2l+$y^u{+DccKh z59n_KwWsNK`#Be^o)0uXBSZA39wdfGzz%h-kSF%_YFfu&voVo2J|uLATRpIz3tPm7 z4h^lDsCI)C+g;~|3Z3AJk^;H!s#b&_`tsYx`d+WF)B@1vWVN$W;)40O8RSj}zRdUl z<$Z}t(ZC38sj1>%YlnACFt2GeFxee_kPz42z8bZ=c)*-m?|W}oT@qn7jao+rSqEp` zOkPW3;bb25AiYoI(9BGmSw@&y1Qnl>ZObbdHl z!oHxOLNGhNqsn}7^o3IshyTuV+`iZE$^{p=n$0^yrvA;oF<6Di&q0U_y|ghgL;aek z&Ac5VBRjH%0mpyyL2mQyMd{a{C_E*dwoI$~zP|qL^fg1$#<^S0NEZ?l@}x=go9sp_ z@8p-M$)cTfLDOTkgSqh&xyo+VEk&9S4c!fj$bWr|wi>%Z`1$dS^OmXSbo_~2;jtP` z!ou2I8+~LX%8a6d%4`_-x)XF9Ce#f%O_98gsa(vQdCPOLe4-?&;IZxPojKdfO=klp z&esJGD?u?{nkKvK(ehMnr4@)4M;iu4s-M}VEj0S0(oOanxJFg}n_KmMTX!isHM@X0 z?=ibqh0H=~CF-qKT#>J*wpKrlWmN&gcDTdGmi?YS%uK2En#n6JMZX6w(e~^x7M( z-n!V&{%|NN^VZ+(vO;hezkTVlTJ6;8`p1q&AU4km6nc zP5q|cONQpc-NQj_OwB1e-Tlz}z52@NKXtb1Isv^_U`#tJBwNh~=$_*N#?5m}9~xVI zW+~NmdeYv;g_}vD&6~NjV=DMn{Y|EtOfIUP11XrY4d!ORt|d8eHzAks2FyTX&Mf*o zEyd<~dgG$>y{qn67wpL9+Ej14xOA!fdy45rdpl!jSr}awMVQi~(2q8l#njAXR(%y| zaV0H}i3E3+#H+cxzV+B=(c9lYH$0DF$X?jgIIRe>I%B7KV=e#lEeo?Z#R=_%WN86R zi?yo*)3KB0S;EpzNlU1-(z~^g%O^(fSv(O{CwuS%^-F~xHL)K`I}k0evNNO}tXYIi z_9(e-nGU#k=%1iYlN?H@4_R4%^YkomS{&XZ{%5!czsr2hgn>A8d-COPFXMxUT3t{w zbgZ_B_0_}@y!YoTgnlwKGDqFyQ};N;N7rwaPw?rq+7ydTDfHw*Ini>L3O=V&*B@K~D4K!;$n)VnK-nRp`{ zeZJYdQR*Rh2Px+*Q!{0tgaIsc&xyASxl&v>O6gM`$z1lz{^l$D^6Gfm?>TEy{B`jj z`aWkUwyqdFJvcz(3>k5=L(?~F8AP`b?&-c4=*yAo3GLh}Ro?YGQl1aD*;g9ifRTsM=R^_lHZ9whSY=s`5PM=^6q5L!wb!Ge*#brWGvOw zpRM<>wU}8&6kZmd-HA(<V zmo$I=Z3}DZxJAZu7I7iV%XR2!ztu!Ayq?_OkM2L3=;(LxqpoE<@PHzp(S&{MW4 zJL`+XJ6V)hz|VqvmB|JQef*&S>w(oq_Qo2j<_9@v%rBl(5x6$9((>~?v@O7o7;|O5 zX$l+3I$OHRwnKdBG}hK23+c&;+xTfQio?*&wj7_IUg%GA?%ge2&0c=)fxhf@ljBow z(Dah?(vx7|;`6hBAY7lAx>!g2mHLf6w6<*ZlIb_Lbm(N&j+1bRearl~!+zB8Z>;@f zmp8lo%45I<8a830c@9snq8f`^N$%9x7sCgW!ikf%PRsrN*KErhLdSqpt0GZr;VYhy zlY~8ceQ4yo-yELJN|_UHp&akyJnuQ1OWel!useE#x;m`$eEZrhBuH{liRSHwhLyqR zY~^!{Wyb?r{U>ihv9@h z2FV2wmI{EHtK@Ejmg+Y79G%^1OA-ytq2s64x`3R`gvSxTFx3UrbM*&?5=?=ip35yVS{(}ac{XuNoC})U zOfsqZz5m-$8R-vP8#Gg6xSbOrtw7atKh0)G6ggY-rE1P*kQ5N=-`-KRVu%<2jG9mJ z2)qZPYHZvAo4Rz+Lwdr4Z~g^{PLdMjd5Xs*I=_a_onWP0S({ra2UU`Ui-3{ePK_yA z3Pvm07lFhyT~3RhDhleo9K^{G!T@OG7!6S={rL~nu5}23^z=a3g#!bCEl}vLe5e@Q zqS1L0Q9%-k!WA9@oW)%_*;k;eoYWnT_>ib_U5#a|d{AAkwW1@*nRP zq=547m9PVQSJ87>s?T)pw+_&GfTS~;&9M|j>CVL&( zZlXaE^|8>Kr|2Wngn9toR%QD9K=V>z>^FOpO01h3-N}%N%pd_b#{m!(S_JPUbt&C4&vDHIxXeb+xEfPq=P`>#scRWNOIgXK^Xj}5Oe00Z9rEatAwF9+WNr0l0*D$7nCa) zxK5(l^YUUV>^y!~?AF#C^E-!&gE0;mLOyZ9wX-Ii}0k(YkK&9mflI{C}= zarV#F`cRx=+U_aCH3;BpFev2PgDu7;Fysbr_(Ox(b}3!YP;vfz5WfxUITIDcFXr4sAy!}A z`(D4uYtiWD7?(D>MeRFiQKacDB5mLQ&h+5*cHn7yb7zJR08r~4edC_iO0Z)yyaJ(w zZOte<>1QC189a*$d95ZH zHPOEd*ahXbc5z5Dpp^g)tf*eMgBW@fw;?bDU{rPg5h%MZu50Et=NT?Xkiq5&n>(sb z)Oyp`JT+Aj;T5t8s6EJ6fKe%0zPD?HJhv>0d)j34QJ7o)&jAAVESlKZ@1_;Trs~P8 z(#g>H+lNrk7SbN}vecB}$35@?%F=lgGjd3b?h?4$k9(cGWr(_FJxT+kuG%VzNLa{^B;h_Dq{Xf>JbM-q1tFi{1f z()h0OG;9tJf6E*18Szb0D0+*@0V1Ro^DEOMLfM`7LROj=Pg6%LtIK+L`J7kZLCU!3 z$>RqG&sJpZe>16%<-K<&Fll*y*=%KFIy?UP-D_#YM8II2G@k;#{Y;E0;WdD=#uXVP zJe^Lm*=lJBm^BV!M@YLUAZD-R_v<;==}kE!crV+h%i4G)#@@a7b|udys4VgwF}Ocu z=Dhpq=_c0(JlyERu8h!XYBd3tx4g56I0u4s;8fCNfXUR({11zgcTEn38tYK8>2WYO z4s4Q{pBi$frfm*0pI2;KdbnhYjBlFPD!VVyeE(ug$X0O&kP*nFTt$TOwYruL#3b|irfeAjMJ_@+ z=^0WUA<4f^8S~a0D?4cWC{|meVRceLJ^E_c*_$*5qtbUSUZ9C8RpnQdUvzp~nXmgo z%io(6^UZ463$MTSDP2YYV|8|xjuQIS_UH3V-4rKyPdpv~Q;@LA! z!tbZRgYq~&q4sjxO~)SnabplEd8ChiSGrFl=+fA>RJF+>j`E@$^4wM{?)+fF>AMyw z@Hi`Se^S9`b)Jb`eh?>LoMG~j^ksZ1H29fqX_9#Uu?7Yur4VD|!rYW7r!J~ zMRLgyQg?3^s5UUpS+G1Ni#3cp9`84GVgOMo@!?%i6r|ngb~@N&$!cZ8AI$J z)-!pPl)6+&P_O$)W;<`-cWnEVZpF%UN+mU8#3^^0Z`YKDu1tzXyucLM|n*nIL}9-2MDRzkZ-Zlw3CU%v}(TNvVprVjPwo_vzMfK1gXdjA7a5{)L?n z@jH0>;T)75ooxHOu>r3g>OLq6aRCVt#NF4;J9A*Mh)czNj2c|;g4vo#eBB1mKCmHI z&H%J1>stezCm9v2YEP5|5z+&{bqrl+Bxh5g7?iejJO({CMwkV~K5V>e76 zZ!N)}*2kJbq3xGUqoH8{2+nBkuV=Q3czlHvKOo}wlJ^`!K8ZEiL^sckWlFH(ag^>m zZP#@?$CX5Zj#cV%?p75dM>z!uPOJNBioZ4%KGb+h4k4XC4NY8cGIuX2rd77^+p$m! z_T0;RcJ`B$n@)?w(3k7n!V{N}ph{ico~wXfUTX941d1sm7hnWFbhZJ#XWd>)G`TQ& zIs=}+TRB_9S~3!W*}EkqIFN|&<>G5aB(m2e4&*CkN#7d9#e%D9YutNn&2mSR9fdJ>KyD}(wwS@X|nixpM}6K2&dhllkc*q$?PPCdXL}PsYDkNem^W`0n9uw+16SF_Qg4bqBB7S67+@hL};HPS4Fv5+N zuCX06xYEavsCl-^U+vjcsWZKg!w&9F)A`}mlkl@dtBh~Ab+zDLWC;LJL!18T?2xZ^rqVC37`P3y?8#gW9+0K-y`b{lge?56)cLM(*Cgyr(7Jx@V`RX zh{Y4)?Umr>Ajj5ipAOmn9)O>{uO z7$+7qh6Z){NEiaK43N?5jb^apDBAE9KfWhPV%AT_C++cc3d+#3N!++_rIIUq;@)HX z$h7&1jjlS~o# zfga&!M`^(6t7zr7*2;H{KuF{eJZY=EQdenf*0jg2Zf0}?VWfxOZ?$i|KksaAFgvPW zcLK$1+OI}qoekvw(pXN^$X9$rgA*X*aZ?xE1Ccn{?wz&O{V!FJ-(&ojrM@6o%K^8!}&5}0GB4@ROhT?`<=DH5P1er{kQWXL>zUUsp zZ+rUNeNHy9LvNd!xA~Z2$E6;7A)}6!>}X14h0+j41Rs-+r%OZlBqd2D9LTrF zBFx;G9m3y#44K;2)+l3-6ghPJ9#zF`kIJeAf?91eP2;nH{fj?2ZNV{XX`}gcH3PBr)d=TKL)zQ*?)O?pSK}vNfD3C zGmE$UEh#tV)d9qb3Y#Z>5(BCg+x?tO?sOBy0GP(c7kz^}%phG}>ySjSM)Yg%)Y$q+ zzGyjmbFI<_UkoHU00X$W__3xq1iq_aRW+7!*$0r2q6qJjYSe$YS2z57hU5 zUG7{tM&=h+AZ`R~FW`74FImXC4)9=k?;#r~2@Hr6ufa1;y{h zt@Kl-632Q!8Uo+QAO)k3UCQb4dC+ct9_Bq7T&DJefv!;iNaL(dUnvQE>R9(Dcc9$Q zsFhK-FE2fLX@4~ta2RsM3|4<0gDDttsCWd{6VJ`hAL~gi01Am88qVUALj2+Iw61lj z3bbucOtCHCJt1^pIpg;ZDEcPL@Si^M$$Fl}Sv!=*WJi zwY4~x3jTCLBeSufS0z|lt5+yZk``z$I|vu9g$}M~y|FA&^tX-327cuN_ph=0L1lTq$q~ix4U~tJyip;-dy$)~zPrx|iYDS2IjRd+r7BY<}rYNZXzoA|q{nF-|$00d~+ zx}=tUwKlB#+%|~Y|43ZqMcw1C)3vtOfD9Ap2kl2blK!n<8Y>z9UbQWE#3JtP~uhhX80tE?%DxHHWG!%D>|I7k*nG0B| zRW}QtvI%HV%D`Dw578rK(Dr)>-3N2?X@QZC+J=hXnBksgp)6&sQ3jqJ?=5<>~lhv~X= zBZ zVNYEKi@rnp+di(^kM)*m*i!n&fB-<2`fbbq5$64b)DnPt238dr#*sx!DPd>~&{5GD zRcxufWP$fItL{fdS#c#1+jRx+q2XmWm!$}xZ#Co35;{yEW+qHtzd%~D$P|Yh;y;?3 znTjKP2-!~&aB*0{Q@o5BMsZQ9$+e)0sUtZYj7F5p=w~2ZRfq1m|`ny^u zaOCd5OqN(qb|g$>UKEe4Je66N5^1#&w`pRDO4bKd^rGd)*N%x?^sukE+qq3*luqlJ zq|b%O^25;WUfGrS$j%Vc%ua2Kj-^MY`i;s;+r2ZKxuK+hwL^_l90k?;R;Uei3vuKx z6^>SXNFvLXYp-Vl4!a!o=WNU7+cK)fe0P?E-S@ZGK&i%Wz4QZiqj;T;v(bDIet_3b zC?Id{Gwuiu=$uvLh?jAjMYv53<71cFDwL5ZQWlZ~WIB!2>#=7yBYb`-?ch96DXWtE3;pZ~1%(=^ z*A=syTke2ktBM26k*e0g({JcgfcvfLSCusO$24e01-5=)^5#b3R63Smb)l9h_E$(T zBF#^Dz^_Sm{gz*!?VzF787^b*0EsGA+B`KdQM;L@JCn69!7`pWk1$tyr`*pLBw4gG zdc!;4jZHHMznnIai8gQW%5`rQ4vq~D+DqyBV}+uP0vmV#*-IsMCmDUY!Ffr3f-JkS zg`4c8>Ka6erq9SG0_Z3K85eNd`F%DUraOsQ%qE}bxFP=>UJAPc>f_;-i=T;FoJGJ(cOpIp|FS=Vi6y$Z{IRiQWHTU-$z$6yILKR?S zb$`+?#}e()e!<`S&0^}(CD_2K<(p)MJ4c&WEKl~_=PUz(pdTOvztXXL1?&e7GYQOFZxACNqZZQxb9q&gnaUjq*|sh-SAiIQzBASe z)JcE$t^LntPKy68x3>PXxexo4V1YY?4r-}_4T==!z?LP8;2#hmTE6*uW0KXd5e2)2yYw>@u)Vao~O@MVgq^v6o6RjeEop<=!wyJwwB? zq9V0~BvQLxZ01w#@r`~CMe>Gh^`Y0Sl1Z7F8r(-ai=jN$OBMagy1KfFJOYA(fd$w6t+i1vk9Fl`B^kdVI{xvg;XMzI>S>2Pryt{=8Wu=Ri=lux;81D=QoR1d3Dj)tG0Nx&6gIepvTrE68$PYqk5vwDue9 zt2TS!Nf~bL`6xfs8g3O!+QimDmiykgnrtf};^7Bd-x3PaB^{i%mwK;Xzpi6% zUqn9j{*pgu0Q4;>BV+XsYxm^Q+YScU&Pw`eG7i~xj*X5Qu@<{e;qNlmS5#;z<@SsJ zX%56W3met&%t1lRi`#oiZ{ECn_3Bmh6vKsDQG{DcdU{FZ2?o5$%F#;#C}bO>5oWr~ zzF!k~1NRnF>%nNza}_1MYxvz9v2g2R^wU#AprK}xTh;1%2K0ftIxX#wqN1WG+wFht z_^$g%=?^6KBG^G4Cr^7Z-5st~p z$q6mTzI^`tnLJ7Q*6+bh0s;cy&_qssJ8i*?@08nh5s?NGM;o*X7+GIWuX}l0KwSLs z#Kc74N@42dsZZc#4&~*jp6e+s5&giHXB-(S(V%UKwOSf%BF{x;W{m_e@Y^%R;L#T^ zUfc?72Q4DNl|HAvGj}B_H*kUarlzinelu+WSKM4Z)_Qs#xF`efg7qIA5wVQkeo-B; zZ4c)OH84n-#qJU;0#|t9JcrZG5Y+}n|6lJc5@uI;Q-zGIiJA$@yu6?H57*aS55bEg z?A#O}OQq0-}~*uVv(kr6%QG6)=+ z^+eDNcDi{uH!baFQ#;RLU$z3wEp$6`_J+Hzo}O)cBTwk<+qXM|f`QwSKw~b1XV0F6 zEqlzgP2d!O?`&0?x67{+CewXsNlH9>;LO6Z|4lu8{XJ%(Cvz56mf@lEl_5)^hwB|x zhi(U>{pA~FrKJZ)k+H8eetdMDHCx9JUonzwp?B{-r{}nNP5r_B`(WEa>MXXgCOq<+=*Z|OI67|9 zf4Ev!xv)!s88>PzULv2m`Spq68*q=pRP&mg;ei3mQKkm0E(qPF{#;QIe|dRVU4r1V zmA#p=8VTSt54&%D8Pa6yk(YjZUF7X<5Q3Ey4}N_D8;^Ct{3sE3boYXlK<755n{{>2 z5nfB%)S>r)r%kp6)XI3wxNhW5$R_f-hH80vRRh%6St-u#S;O^Uch|tdC(!MI3bWQa z7qm!4@R2=KpN!ABd!E67=>eOfzbd0?ZyCH3_7RGm#lzi8tg^Q`O4;EhqaaUZEW&Ej9G8w(tbH1 z8ICoowLjjb?d#j1FLU)q^wIuuOUIYjA!rU3Y06LF?IvMv*%2T3uC>^HJ%^NU1F=J% zoC_=uy~{9bxIozJ?{g2Q9UDDdUG;$B!R+2XR%qXvmzNjT`4bU&Vsv0&?vjvVD5=e5 z%FfpI*+H+6jsyPd2zN>gRi&02@7L11LO#de^b%YrYIqCiYxXjoY(7zk&ic>CzTw&g zhn#$Xv!MHrkjB!{)9g_%$4>1DO^4o$!la}Nz=<{{>ooxAK~Orl!1ghhXLJ`p6c{74 zxNq3p@Xw9VP`;6D?m0D+#Fi%QVsJmDawIw1RrD)kTdsMfESG9yMMd9!qpnP~?jwa8 z5Hz*7q=dv}=F*rQd0JZ9xR}q)$J;>w6bqX)`~+Yh_~-kJm}4b6j`#}2ZpVm3JW9WqtMqIZ&beRh4E=+aWEW358U;7biiOBbkQSixY_nB8 zOyyvvz%-66^=9ILCp|bNOnX*f&UJR?ri{y&5E?2*$WBN&w`J^}`*{NkvXQf^>j(f2 z*ak<)AFHw@(W5N!zF)d&jma{(MM7DV-7F}oLbv3ZXSi7qQ#q~5WMmqm-^V0cNXN~D zfagnFPr1V83SyBQaNy*EIY6VbJCPvR`j=}Z?1L_O(vwcV#S8d*DG+Sj5oX0YtZLTQ zdA=Kyh1$wQJ<;}^1RPWeL<}V^>S*kzH7gL9`{B0|?TjBk9xQWxQqp?oBPv}IhWu!k zPlGN^4m6lDIAyvclm-$n2pe$v5caOR)x9GL{$v1&V=Y~25mRY9Hq@Hm}YpDYP zW*}1z>`$6?sutho;Olm*WM(VjiEQ7ATrekfx&@u@^gdN!pr`kQ?iDW|9}RUx_OTBD zec}i%9FXfy-;)Mi@cqKST#x+(lW*WDJLG38dT1${EbcLL6FKJw3{l@{+QcRKFS{ee zn_P}XBElcH{tfSv>gqx9PB}1lygWSJYdGUusvyU#e1E~20>*A&V4yeg;cZ0Ipu5Mh z_xN@FZFy1nK~2GCXT5Ic7zxXt<9~I*C%d6O)V;Amg0Qfow7uNli2oMMxwI{3C)8T6 z0Rk077>~e$0xjD92wI((h#)y^vpEA9{Q|j=tYCdfbO$su(yYWL^<co-{jQPvW5nOy}iBFm6f;1`N!JY@c>x#bal^rq3T`G zJnFo>yx^qe2nd7Z++5CDT%;HAy-*R~^J6;kNE>`=Z+|~}qSm38>1@#MrgXgeg9kc~ zAHQ{WajAEmkZ8uZL&R$`GA=<(gxuWRj2m3j$qzR&VuieI2B8KsYc&`h9X*St4%|V=4b^gD=Rp~T*>+okH>?&kzwQ6ig9*y9H_QIj)45t;K1@hVr>5RHP94CJbzxP z@5)eN>4y)|E%As_SDWWEU;Mzx-)CfGM0>V>_;4R2CRKA+`lD{cAV}$aJ>S=?!nCF$ zhId9+RR|1r@IjX0N6Zl}V3_vyOI08H-%h_c>&oC4-tv~kIk|y{xFic71PXwO`cT{9 zo08)(_6$vjFZh+l(htGAA9hkh_fn0kt&b~v9W`>T*`-Y+DJe;+yy|IrB<>1P{U$S-M&cDy~c}4&GeOo^mzyJG)-<6O5e)X2> zvwt5Ad~x#Muk4~f`|neNm#_ZMgZ~;?<^PAKXo(R}357zTlkiZuh-nk!?|{SIPhLN| zXrViU_s-@P78E#sesvMl3RXjZ6HS=a)#ob8X#=~BM`YJ?Nr7MGSJ zR*oE0Y%qoL@nw4CJ3{ccfc_8bs-s8FB_`o#+6vCK4Tq63GBfqf%-lNQEyLf3LYqA)$BnVvVtK~nQ#24+EP3FUBr5o^H)fS(uaA1$w7flYSpHgTd_QIy#IRI7pCtbQMNZIMgMh= z7#`eXtNI`J{PseEj#Y@CVToH>vL_=C)&W(w@_tk<;ZD`RhH(C)+3RTiPQjO!M{na{ z;eXc3Qca@txDjy_gO;v?5N7T%_=(E!e_s^zthGsiNM-)-Yk~(-DDyvJcLlFpxdqCv zheukE82x|5ESQobH2BY0f8UxG@dxmE+-GJBv9lCEZa;W%O0oH$D_QKGwR$=ZPPn5Jg%k>5v+vdq`0Pl~9x}5$PC^ZWINjYZz)k zM1~j|hLITNx5w{0XPy7fKWD8oYq=IqJkK3FuD$oQ@8|!c?E$?&Am}idya$-%8;UpU zOw1;IXW#FdCI0)(Kj6RWp?2vCU06#7M9LzJ)S3MPnLwCpnW-gbQ|1%0-s9wcxf@rz z++-FoHvd=H!XGrsu-xv=dHSPosVi|ofNDgKY|7hq^>aR0vA86}LnIP~kN@vWHTPq^ zC-U4ASr^prz5FvCsQLV#ft3NBFJ%IFv4XMf*|>Gs~i=u~O)*wfF**4liXmi!b{wG-0N!lv1=>$$!w7tX?f zFt;QS2m^jA4LK0B@EewgABVBWP)BIdc(+=+Mp|~;=rpQuc2qOZrYGv+lC=C){T|9A z^goL)BuJb?PfRzGI?@f~e@`PPhch-(109EnQQHsU`uQVeiX#k65jVz{PHZl8<*T=F z^h}ZKJhLkv_E06qz9#SwJjVjRsd?Y(6--D2AEONO^`1kv2!QKKGh|0^{ z7+KutbrlI+U747*yZwpU)#3L!LEDn!!mk4~!8h(SgW2K+ z3L@ewPbSi{IDe@uUUaeCcHM(q#+Q6qw1~XldZ_+~{3M61#a>qDva`&^!hYl}PO41g2TZSY3jda^Rn}}= zeQ8Yy8{qFk=8xsXO%oSRY$9!H6{+vL#hppQqTnKS2M!=IuT3qB5>05yXOA$!`8MMd zxghgjQ{jhr!smRua|}yw&y1X%@=z_KZk6D$L%{w}@7AHQNTCh?oXNty076B)ZkcV^ z{r>YX(FNmOo`#1NKa9t(8~Y#}?0lQ#Jjf+KAI5l*bCsO+9Q$V!(4RPYd+QgCP3Sil zR`<=2yBmBM=4#=I)qB4;OadLfg7nk<_6tQWE`0%9Y zLq;FA>J9TvqTD3~1k>J^zw#olfQV4Ol$Y zRiIxzX`e7gLF&;+aE#{2qT?36@8eKD#4`NstTfr?!a38M4wwn*`oP}2ZK=^h>rlG) z>OqDbdt{5RZOfqZ-=0wLL!V%bGW-WZ~Ei>17-c6EjK1x9L zIZM-K#_aZ<0kirhX;=t78to@q}Nxh{VTb@)JIp;Ft+RNJ#W|qtlW<>DVxhVl< z3}wPaC&uIQsoo4*%Es%E-lI4t>l*C6n0=n0k=$sWk@-PPM|@`Mx{Ges{jyJkxS{Np zENjNG?9rv2Da1<3;7>n0o<#ww{Fd^cxHCqsQ%|8afCw7b6cw z)yr(3JC_*FvP08g^K;HcpnNZuvOQ9hl(;NJVeQWKiS#UVUEbGiP<}ry|A_s23LH8ToU`MgERiUwkiNYvh-tePIujaRu zOy<@ZYJakWk}y&?XZO_6`ywtgJ*T&UR>1FlbrOSkgIz0cOPz-@H$ZzcZ1j& z7fsha?NEmKAvoM=r{cG%8vLWxH-Bi@>Quk_W2vgCzx%PhpysTQ~vXX z?#?RKc`}ADDQg>bR;H8}zx8UVF+Avba)nPRH_tF8OOcR{iB&Ma+lHepTVMac8*x(q z=}ph7ibS+PMi^;jC~?RJvpAArKVYlGpDiw^e8GWf___Xuq2RKOY??^X)ercN zV{ul(JM__;(zUUBf%2aPf~#+XGiqK6b78hxfVGZmDz(40l}9EM&~4-57^iq{&%vg1 zjEnSM<4W#*v&mXTT)}-p1}u^+nao9)HD$q(-mOq=Y(&-!Sqg+$3Q_AJg_J{X#&B)a{>YlrUZuJv%-d4o!$h6G4ms9&s2~J)8cP-Ldq^MTVj=FzX_>v3$PcO7ifnp&#_;h{t8m{E-52JoA|XAE z=4kpAN|xmF{n>6mkI1Bjt;A`1PR+vA!F0da zF!7z!mZ3dsUjRfd3;I9jltQ?M0VD_>J31xM`n9H2`EXs`Z-aUtolz0BiZQX>H(@j1 zJ4ED9czDg1Rf>BKV(%=a&zKfG8x-9j2Sm$!04v0u*Y$vXN730LBRgjzvjdm@Ltp=@ z5vmq1v+nZMx8L|-?evbNGy4jMUP+nA!VSJ38aenPV;;YqNZ~(FHCU?)%jA91(#3PF zSN7pU#x|3aM=(o7W%gve8xJPddLrI+GG=he@j*QHoU3N*AFD3M{EjRh@nm21Agi}C zP8^<#=o;Wmp;mSk?K3;dGpzLX^xbYO(TIb z;{WikU=ztm%_>ns5!Q4F4EOb(06=vBGl_@&h=865iOdDupACSKHu7>*b})&g_(rE53a$kcIJ9SUTZJg#%{&Ra3N-09fb#A|vY)Cp=pu z*Vi}URy)o!lrZ0PQnfK^6@?h4G>*w&GF&7U(<0XHbgO2#?eCyJ;D@U5PaUue!`bSL z?&ks>GtjjQ0sdZYCsgy zhAZ{Q()^4g!?qG<>6a)^YF>{$$U-ZnaMkZ%7i!4|jWVyUy6jL|^G|hy6__c&5&+f! zJSkKKOr4}pMQmQJNQx%u8Q$_SkxtmLPxETqQGYlBOcbQO|>z%Rn^3r?IEjt zquGfo3j?jnD*ZV3RJY$VJYsxjPNP)+7Z*VF!IBq0u2%PGy8r!{%|vX0c=WrtrTa*| zN4o2_iUxGoWUeg+;U4<>8kQil9?#oT$N7zpE@$#B@<<(LV}L?@T^%QDRr8%0@NI=HZfB3cXO`WY8r?0Lnw=rxEX+`rQnykRNhcQ7E{ULbxu0Q}AT z^p1DeERH3f5s8%}HmQ%&PBi|UO^Q$Td!Snf=;?mfIqAXmzy`VPxp#Qm*@Yj>rhR)U z#(=d|04j6c`T-GBS4X#Qcg!t_6_t-yvQp!>9HU#9df({czLo+k$zj^1eX-))M7IJh z@#hlf@`N-gaLH%a%+rVBlHL7h^M)zX z5c6VW?WD``EmrJG!v!RWlTqx3R)%2|cHY?U+9`>%AtKAC5iSHOdJw8%T((pLl-h5gQ zFDdCDV8WULjcU$G*lFB+0y})qOUkov7EG=mIc?nL!i~XFjf%0@4Qvoo?Mkr#yCLQA z*Ksf3hMnrhiIg#e14aC@>VN>){nzut^MCGJdHtUG5hQzqLoJs{@!BvM_EQ_qg>NLG zL{6p0R^7NXeiFSdS4wVD(pB;HYrvb`FnCxiR?a^}TC=yxloX^6E-uKzqO(IDZN%y7 zU+7!2w)kydz?NCYGSIBebo2}{favM%(D$4ZnU52=V`|zNcX3Ss0BgISN0)jk*XOj{ zoO@LZdHqG&V})`eXT@I;C)yj_SEBQOM|$qBx`tMHhFb?}6pQ2QMG7VFm0YFwlPRrC zU)fB4LflK4Gat@9lBNk6lG8^HKvC~4Rw98WqT=*eB>^dPO$GIHJ|{C!S1xXwPd^Er zT;*6+sI<6_>Y{DaiFf!u%W+lYl6oPtW#m_hA4$M_9#ku?S&5E=+9D1X-zq(;&P&sykymerMh->*{8Z+CiTf(Pv zV2@(l)QA}t*p|9JYgO521xx?$&rJ#Oq#m^(0K8>PEk72J9V^oxdUE9V_L!f_xtB)C zKO{$UB{72_Z5J8@m^Ic)Rru!fkEtOt^8mBQUe_3?oaKlf^=cFu^w#fJ4_ie9_su|X z6eKmP+_QiBs>XMbuU{ZJA>;~@*~z-Z`jOC}BjLo-@7nt7KFsjo)+zEI+_}%Qg)9et z81&;24$+;?`=rS zf8=WDzU2iVqbTsk~ zM0&(y5bq`U+jRuHGESsj1dsOAiS(ZTP{bTPvU_3l2YN_4|R%s7rN!Cxexo+l!7JgfnnU zN8o>Xb*UOS**5!{;fFq#7Zpfn9XeZOO^+WE4Vsx&WHTzXbt-2TlMeV9JmN6?CjROj z)3lIO6PtqY9*wk=@&GU4QffXT$0&)K%BmBu&8)Nq9QSL59Q=YN`1pcerQIs3VPR=0 zvPPDVPgO-lWm-Jy>sO_k=i9T3X=xl4CT2R-6rX5z=_@Mj-@j99P0!e`BFx<`j(@9` z1W<4vO-kudP~>bSxnj$Yj>m zSy3FaQj6`n5C$*<%4PH6ftiIG}kj!8mRCF^EUUGnIvqDls0gF&7W zI&;a~K65W@q~P;=2>KO~R>I`8LD$rDUq1bbIWuE3^ji$&#_u5pv*)_3SEW{6?F7qWZ&ix%vp>>AU>9OCrQrn1FW zH&B~)osO{B9MN~6ehVeyKC5Wi?4hBdT>Z)w z+a&w6Des4yd?w}=a;W*UjZf%od{u6!f=>_@aD(vfp|Sn};;<1MRLf>M`Z3R|Zshqz zy%LA>5=pgVqY!TxsFc|mgo%;X%s)E-_xLkG)PZ^sGP;XJ$IACXQGDaQ!$^77N;^+87l+OSpI8f|*IGzo%z= z5V6hey6NMQXvjt@e4_WF;GOrCfmJXq74>BF(C^vVg2F;I;I8Et6=9&Z4M#_c2n0NF zZgnxb8gV(kLoB29Ac94D(wtUk0)*^H+plkNyQy`izILjb=Stj&H%W#Y6-}k{akzoY zW^KVFE|oG)->?1wd6|mGEl!l4)Jc_sNU95)$+#Ja3P^*5hT-bh{Dj>uvzmT?vTFdj zx-*N>%Z%ea?q*VlHLJPl=62_b#r`y()s`TAkjeTm;n&qIS;$ePF+>*oEg zac`U1MpftE2Y)HqCE&x251eLdK8OhZBdQ&O%g8S*6gBg^vg)jR0O7*NxcnX0UA&!9J3HaQqV=_|dJ*@!`Ubto3@M<_{G%lok$#^B?Dev| zsw#MfN9?x-*zF=-i9gmt8dS4$93x2#ejA3xg%p%Z4ApLQtd7I*4j1SpO|zqK*VdxZ z3(2B>mDaCQ7d?mRX^sdKfccXV085(Bsgo7~@IDW0@QBgZE z4M1n>2yQ#KOQ-kQ@WF%3HV!{Ecy|Ea)iE_KF0<{S_p3i>g3igg^Ot|{5SR(T;QIHs z1JK@bKYsiwvsK#0@xarwVy4lyCrzwvfU-xHUy(14iHW(WO9lmKmFb>dqqAQ=fRKo; zq^q|6GA3eB^UtxlVb$dF%{HSht?tR;Tve=MD@eS}U2kvRc6VQM^bLRU)7Q@AM_?4jGQO~LY`y+0EqRQNAItXH8npB(nqgxV?Fw^3~ z5Y8R{9sGn5Dyb$qc2&WJ=)CW2a{Q3TPP6pWtq9qz{wt6H%Yzk-(_fB|tojTn&Chb1SvOd>CbDG6c|;O4~F}S;y(syjm;1jNb;1L#trfcA9zG+z)*aM zX6Ij@k-{yp#;bf?fO`bJDEA*c7MGp#a?mqPt2Jtfo16ydsNFzPwTP|i;Pd7k9?4o3 zM<-P|Vt*_Iy#rGNU!S4)6o9?7$#T7gOgiuTs>_8!sxzIXn8W7ra6H95VVNrCcATh} z#0LVM6E9afssL05CEddkup_|n1dlLU<39I_t*tf+Ie%F z8u0Y)*dAS!l^z8Z+W`oTZ(!N5=(O@SO5XRyp~~(XzASblL!z!=DoY(-4^G)yRdWDF ztyS3C@Fhzr@g&vWz^vH zDh#vx3k?WK+hgPEea@g)13I5>s0TcmaBbRxB$zMR1?}ij@W?fZer5Nv*(s>Y$sDZ{ z6BEM|N)0|P4wqU%i3fm2FOXY3H|Mmed`%$N#N2Yc7;#}Ud!J7G{s+)NUZDFXOE?ep zW#0MwEnJ<%FT>J&W56ldz|{ptVnQ30_bM|)AcFaNJUq=SD5wwY9iR}E18tXG#z!kF zE9;(`+PgEf$f8k;XGiO9-@Z-d@c0(G3bsYDPlqueT93!pvFIvawsHv$%$wSajC`sm z4+hm^RbDnvG+veTEY!!$lzstX$D?!{0iK=<>nO+=6*_BF<)Q10%uJn$tGgy>hnHP8 zm4Pe^0dyWI+^L&B9{A=1koUM`cn$ix>{x4ANwXj!I1Am}032=|a5xED?^yTvEI%t% zdjKLZZdmDfm@do-tz7o+t6Fv76ySgGX@Irw?5~cFo7?A-Ew>Kr|G{~48QzN++A!~4 zBcKVJV4F!c5qWhOhwcnUjW(&Krc&+wR%LK@HlEde`CKw|jgQt+jgKGd^i}EdO0;_Y zv8R3_;RHUiwO$m`3^%WeNYt*fWkTIg26W(%Y<-^Y%QOX2ux zI28TErRwTx1GozCRb!lVR@p%Px^_qEechG*IG#^VPe)Hom`x{a|!wm+JLT>MOS?v163bw%l)7f`}nQ6)2I{)~( z#D)wVoZ{lO4{m4yGJvw2L4|xzf@cB(L<*U{#|z)uF_IK~YT{iz{iRE_q(`fd6!<=I zKFY|A?7lQ~arxI5v6zqBk~RJ+h~Q(VWrx{w&5E&v$lOtWUS75`vn4b{v0Ag_2UivE z-xw(Sc!1bEpvgx>-G16P##|NMsDSR?meUGxe7>CO*Yw}Ye2l1|wEjj}5ubUN3#O*` zafeIe02->8U25)jP;BLIBg{CqG9REG zK5#$#O6vJc-cxGEFga=tlHaVs)Z4C!ekIyC{JO15M%wmRdw64Qz!Oub-MOmtPh<;h zY?w6|=8F_{q4sn9DrT={*_fHOhzRQOQ0sD5H=wlb+_2>~xKFJQLsqH6pSTm=)8oC< zb$k5FQ@!b(o_d3<`psTXjVkfKwM?K(aF~{vLRnN~mw4}fR~ox;8&MC_p}nCEg=k<- zENrJQHe7Ca?tD0`~{f+~pMCN)`ZLQQVk!g2W zB%o3Pqtok_y!V8~Uh9~_;eJH09mEz_VqeLn@B5O=P%cXYNe{FxsNADB%3Y2@E9Nkq zXYtZ}q(-m&eiT)28h@|>h5yD2dp`%2OH6&KA|%>*E<47h@#2siR-1K^6$uZQ&IRSz zSF%!9BGz102M8b3`aG1Ckv2(OaB%qFnrC1Zl>6EPa}^lSfqIZ|lfv#lnJ66nC{Z9z zy&e?tCRjFz`BT2+{Flzxvnz#=g#n*oy#urGHD~R&7l=uTc%e3GCjZW}x_~FWjkCr2 z?DD!hFM(w;$o$7x%z3eQKF(%oXY3!KdvrUlMR0R-+s$^T!a0I_&;+HmwK9nYR<n?RmT?Il4z!iOqAQc%IAW{Lq- z!~$AwCmtRY@bs9wccJ+Zt~Eko)9t-J+9%Mq@>)Xr+k!)We~bCqwPePr<;cJ)iF})u z0{aL3HuIs59PVR~;)I$6239wWMCqsV-lb_6C1(Bu@24@b44Hjl_ zY>Y3uEfbF4x8Q#%p7w<=@o2I%HK6>nedGbc{NK8{jXa3LitUTm8 z2DBRGaydp>(-AJt$AX4_DJHXb}ydwYAhW9hQ(rDF`KN7-f}XbfwpK8G;> zO=S^P9p)-YwMn)Zao3BPv9z+Szsh}Ez{W=BJ6=ZT#vG0HbFQWdeif8YSD7$Lf@XYm zx;r7Zm@rbZQylNQzr58ZRZ_)ZZ9f11-yRu?$Ora;^N{c19N_3UtdZ26ogQeT?YbD z&Ch}@tOiZ^C~2v8GE9WV zpx~6m5HYg!xjsu+8B)+SgHN;==nF@R#v?t^h~PES9kYV{Vb{!Gn_dpzs51E{+*(XJOPj5{U>wm-Ud}_;os~oTQL37 zDC-J8_9(KnmDVD1>mM@n^S8Ii&#gkgAc9}adeM8Ggv1SC5Ab6)cah)Q+wZHZM~*v@ zf@iM@2*iyo>G}Hl>Vzl9#$JwY6XAW83=Q+}yra&?*3ccmS}F?w9ilEO$Ng1}cO4Ge z0Cb=W48o*r*s`)2C42ikKnoruocG34TEz4-AnpmxgJJ2qP$u}NW=2jKs67uIFzwC9 zw6nBsj5hruQmusd4i{>d&GBr6I0IR63P8br&(-|~FxNY1XG(#u2G{E-y7;5T_$7+( zAJ44s?GwHuHBtkkg#ZdVBz?<}Z>dFiD7P62oIi@FuK2}2TvS+iz9O^|Y%KIGaDhTF zE&{bE*^wrKKtw-3Ns$4VLrv#^=(XY9YAh=)U22S_P@B2iQYk=fCcq~Am|lZG@)k7f zF*YD02}ZXm(Z!2A=H+o*<`dh(dC zwv)RsLS;7nH923xU9h~yV)^!2S{2TJYG7(8pm<#dYEl=lLhG z=Uh|Xys=mw2x{7&5<{3jrqWHX$Y#=bn|c*f`FB(|uugblJpg_cm>6)s^5v{^Cz4Ve zf1`et~LK~>1K+4$(FUt>tr;Z}t z9-tC#`$c&5-8p!*fW)(DgSoOaIwxM$)6tEA60Sj|*&JsA+pHnUGIpX6^^jWOy_x$l zfu9aw!2%TSsXt(!ps2#Xj&MTL>OceI5?kR}g%cE+dN*vogmd+Pu%S3Gx+;LKwF!qx zQ8{}{z7>PxaX`4MWiGg4EjR%nTmm0#en=t3pwu!{ZfA}QX!EBQf5rXi>CrbbN~X5R zKr=4nwv&ojP$(4G$*lbRUb8r6CMGHeZTe>v@Wf55*nV|@#tlwP2&lNFZQrdlf30#r zh;Mns zi7?Pnr{%wk?d?@yFh#H&rE&$Yh!uSM3e;I`S)1K5+IvO$_Mi|!onFWE;7ux@252=c zje+2T|Mg&B+)-qmbXnmtHEA5SZcGz*WRt%D3~WV6IJ2>_0i3!Bs9|DaBAlZR>@)18e)_N+11 zS)gg=r!t`v2AvLlI^n!yY+HYjRQc`e&=^sE&~v9LdF41b^J>Yxx-$r;24^@{A^6Aj zAxp~^4D%djTZ2M$=YM25-KnonQD;{_+C_p^3YVov;2l1b1sjq5F$RE{?fNd=`rDQ8mP&PRem6Xjd9>bf$8xk>TXHh= z-5Hal#7jJb1dqNCx7Ge_9fbD7*qE3vO4LzS^HbT!m&Uh=*iVO_X1Wyz9De;(i-QQ>Cs@&F4 JD!yeN@<0ALF>U|= diff --git a/doc/source/2-2-collection-poll.png b/doc/source/2-2-collection-poll.png deleted file mode 100644 index 30383811ee3d0ea1990d2de10b44172ce87041b2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32911 zcmeGEcTkg27cY!rL2QVM2!armCLKhigNlN5rFRezlwPG1Y!oRGkY0lH9s%hk(nU(7 z_Zo`S0HGy7a`)r=o_ps0ap(K)%(?S@bMF}^hI#UowbxpEt>5~sy`PZhY6{fn8P8Kt zP*5vAd!j)>K`B5%aq{EYGvLVLeas5@I%BS^@PvX4{Y$IQi3Uf`IX%;NrJ%S#3w=&d zBqlR}gH&#cs!yq=&t1BF`_`MxAUO((TNH{<9%*@wtxdwe4lei!5NSxyBs4Ft{9#K! z_4-2p6vLT$o2zy|>EQD_H?6$YaW6L6ye%9AOU!jIsOUO=(1X8XUr=H;I zbdh|KOI~m!4j(4&d$2hR#H6736|Z^!1oX$3r%lSx_pRuUanScej&lOg4*@DyO-@1w z65J9w(qtvyt_9co?Ax4WII+9C`@+z$faP|^?#Srq4d@rSpi6IVF<6O+h`ca0g$)i4 z5+5^AYHMo?xh{6)=H)Hp9hsS#OI(-K!cVi#B)j&6BnVrl4%iDWZq7Cb)nB`tm65@^ zQH8xkeS%f&#o=}wx)8eiL6M^$Ppa4Ajqp>f^Z5nEpOppb@00f;m?h^@eKvXp1t0f_ zFC|wI7LyL^$O5D}f%LL6%~OqM^&5P>MxXL?N+7~Dbv!&O&vVeXWz^c=L=2L4D@LOtnwR`Ib>S=o>r(>ysZMQYT8*3HPb5)~pr;@+{oiGjb zvU}^+--o&$M>7N?yV2NC`Z38ncg&BfR&tRhOuPNIJc-fFoqBu;G?%O&hjFk7z3f#^jBz7s8gyh4$aq=r!aDEt)*1*X+p;2~MS0 z06$u+x3!B*P#BH6c^GZA#*`eEtA#hF83~i;1_G9sPxv{MuDBeHaMOy3$%i`lm}#NO z#eRp_3}kiDt>=_8E%DLD;3}YkZi18jZ+#Kx?j!N1OO{CVuA_Nx5zRH~Iko$c`D$bj z@TnDEhks^>I78Uo*W;EA$cp;8P#)~J*g5PvPeau`8TjgS%e!JZlOJIWJOZ=7 zp72)p-?#9#FEl9q{x>B%$RIW1mFSGFu6TNVYBx9OC|=+D?rt))I1+!&U~ zc-0wkf_m>Qix(RQbHK?DWo7lO4rr9PLxaDYPWqaEQQ2SGs}wiUQ8V{$ly%pq@yD8) z`nFo)HOH}g?%i9DeH5eyh;D2C_HbYFRQBy-uf?9?(1pkpy14eSeQDr+Qf~8$WT4Nj zk>5-fE14lrW%;WWM}gm@Z$;?~y)<%fYzIDwfB&jQ->XB>5vcdV`_6WpJ$LmJ(pX?9 zc1x>$1e~q*S<>$do#KIKT0!mHL*Ml_&TTThe=zRN6NZx>g|-nX)zXY718zf_%}eTb zo!fGZO^p^YxyQqTuH=GdQm~~D>YE?8!_-<$6@l3HqV|R8Kw{-iSW-Kp=ri;@Mk5aV zq_*<)`S$T;=lfwCv+NvZs|M%!=j(C2yE>+#;^@lX4^Lj38Ohog@y=~hXg93fE9^`5 zd1T!2j`4ZIPJ?O2@qrixXQ1)n9!y%#+1q{s-nm3PPCkdt$8Px~nIDbhA0_vsNA>w{ z`ubpu-V*2imuf|g{k$5CD-B*L%6r-+M;ide;!`G6bxo&mpQNO{B>xOD-4v>o>D(O zl?r**ojWpk*y2}TEg1sKl}NZ7DHx!vd$KEt=bBR5Lj$Bzc~j%@P=0ypH2Ym`m!ibZr~!j+tf4SUa!63@`&!~F-X zJrk)_SW@i!BsMTeI$t#~dLNy}tYVSj9<-Pazy9-5`?u@kFppx}kVU@n3ulPoab2HV zF9^6&Tw(7{F{Tf#TpQgsg5{YIW0KU{j>_{?O9hwS!9CE>nM;*gZa z=GcaGYGJ1AiaiK|cCYPMHHBmLL{|;R=Rgpr^mE0smze`C23OY9T(Bw7nw|oxlCjvb zUBrZd!4-H;U^bO6wS@#%c~M}CA@^BaWwIDeOkG_e4&^ADXol7YuI3E4Z9ffvippzU z6lrZs@UZtzPor6NR2!T=DATitViWsegN8--R2%T`Q^u29L*p@&bp^4!VUsBL>s{`h z-K*Y}FZ!$<{A7l$ef_{xqN&0p3l>Yn3fSB|ICKDc0ciG$VdcX4F-`&#uZicIj#bT) z58t9NhSipj?#q_fn{7yaA;{%T)6BA8G?3V_O%`>1c;dH*4j;eQZ57;b-{GBxw-%VE z8;AS6tFA(R-|AT_WRRf7H3?dJ{sr;+NV5J-QQ>jzC4{H zt2C@oqsDsY5^SUTk#l<_d^%ygf600*VLY1XU!8se4sM5^#x~Fm(+#NR6`5Dqb+!Uk zyq+qR9xAFTp0Uk_FM=V_4@`l~!XvSEYtkBJ=$tYWbvv?-zY-`|l6XTs0;bEXD>6B` zWC30N6j{xeI{7_ay*|sda)FoxlJ9E2Y+S7ju%dK@Luc#uQ)>48$#IaUjRxI3I!X?J zZJ0EzR9sxGK-4;6ie+5v1{uWrBx<+F{%Lb3y~#>&?SozEmp(wpP%i9sRxj~VWk+r8 zB^G*^4+hDuUD6 ziposooax+6pn~Q^&4dWYv8bopLb#tp)Yn8*jx3IrUIl|mp^||AJAKs<8KIy%^_Nlj zqtt^Q#r2|r%$I4jC=L4GJD;4knmX_7SDJ=+q}IxsOdsv)m0La{6%W&i7IIfFyIeIg z5r(Tt)xLaM*Fad6{?N|axsL;2innI{otW=|6Ud#n$U+??wkwfn+rfXfe!ki-1hOiW z;XJYM;&5IqN6zg5<0 zShvIdLS37g9|Y0amygY(MR^blh2EuWNk7?8yC5-}4@OaL5f@SIl9)+YHTqboakY3P z)4oc?Z#|L$xIeHxzYF{@1_DTWb(^IbBK>Y2kdL94iK|Iy7L0@56>0^CmbI};G$wD& zuwr5RR3n%5U`q{fq@$jWC7A)@&KDeWYluZt)rwx6cc!Bt0wG%D#RgNk-<5O&x*jLx z0k6YP9RJJJ0G;>|e-l}cb!t$`#CUDx^GLC0>cvX+sF4h(4r;`Wep|dKZ`3(0mm)m? zs*W&;#nn@^MKZLL<1E3@fPfjy!T95a>MN#WZq$EJ4my2rAWFeLPQWJ2TC^~DV%ncw zmP3V@UXZvE(HumxbeQ1=yK%+Y2bJ`?TiqUmcJLYY*sfkiO^=s~Bz-o40T;egoFL12 z$<2MKIp5q5E>XoRa$PjLE5hxrwyfU_z61c!#r+1}>w281j(PXgI+h0Hn6PUaHWg&Z z1yii6KrYW{5FaqK%%NP)l7S*p!-h9@;1NKQ8p}0?wOb(vGn{0x>yyNXKd$NDkdF5< zmKa%`w7!0L9JS>R$IfJ8pnN~!W_i;W01e9F_<6^~1~oSO$sbmlNYvyZW8~3s1Kd%w z3jie`Wt`N_AN$jez_J7!vf)8?`@~s++4%a+zTW<|z^1Wq1zqK0;B7hr#_`A_C^tc9 zfb|_z4#qQ!zP)KZhqz^gx-XWLCRBX3F80h`m zTJXP~hb3hfn1-IY3vUFbqK#afFd*DSq3}DO!!5sSioLY$p&4Q^EUz+K{Ri;7hea2u zK1q&C%KJ8N1GBYbK_V}wrf(lmq|In>a49sY5ro%|jZR$=&K_=arJ0VBJy!~jLuhFG zL;sY=O0;dyL5cR(T61L!xQ_m)XIj5pOg2}ArSZKvkhODPv;&Kc87t+BL%M;yqwi}y zIQO0@7|Q+Dum*Ox9G||i6v*gZNyhZT8qwo7`hAG;nJ|@=3_>3B)8ccI+bFWz^Mu&7 zb8qdrwN#p7fH$Vs6ACC#nEOr6AN1t65<)b*7pzLoVIbL8vMxXg7F!+U$W@ze=+yCy zQUT=2;3Sk|7_p{92*8OdNY4es5Ecodf%WX)(BxF@+K*JMXLpa@Fpb`_x)-ZTr7CJV z9Zx3y3t(2(4(g;fEQkx&j9^hR#S?G&W5Qaaw;a@`H{>T@D^o?dQdQe`e=IphIVhVf zb*#Pwxb(doJj(Sl@z~4NXZz6g0hp8scv+quY)CTAw8Bc$-!~=<-)B*B=twY95iiJ2 zi~YUp7n2C;9;pf&=LQ-Tog1W_S03$DvV?Z$-WQU+U+%Aei_I_`WE&2iU+n?vg#=g&Z{3*|NRVZl zM(-ben}`p1M{DBJM>7(rQF`7EL<#nNLq*tuCch`wTW%L`?G6C;pGp?GErHsoC*8Dm zAT7iTdM=d6u;kK=r2Sf+>Rh zm|UaWF!k(Bouy7MqdLu6==uc_U~~V4$KLKAzQBKjE32(=xnVJpKu=FS7L8aoMpEOgVkWWmM$dNtL^f1gKkaYw zM@eV3%?U@WN$RSoU$#{7x|0w&7FVLDr?s*x2#Lw-4hofY%B!}T*hUSi3+#`)N(d&U z>9|CK>*u|5(ms-Vg*`}LOUk9^38J<9V@YU6fGT5gW!ru;kxaXa@%na#>7=!;^^Qt- z>CbF5Bfm&Ps?Vb>r?;TmF7cdwB|RK}&&MH0cPrbJwj;dL1;48?ocgW{7B!8bj{3%0 z_wja=$#T3(n35v-aOf(tT}h(}(JvhTImj2wGoqU@!FIR$EDofKxsbmlBLI5fz8=<& zC#rCSsw#8V?K9b+*Ho2>dQY>QryTTHg`1jZw<@S9Fxq~w*9s+P! z&t-vQXCwLy{=hSfGd0`MD#rWT_sT%6>39>rlq2%jGMjDF%6;1N-3lCEK_T6t9^dEQ zFX*TxS&O!_rR?tCRjQH8 zPzebpb_lM}Kx*EHLk$F3sPqvfEsa5$^oVuz4VbXly~9Loy85(48u1yXmnf;8ep~y) zak(kZ*~TTDi;jf_;7%cn&+utBsf|i?fqRz(^w=jbV$#(Wv5d5WPM6kZGhyHB|4c=~ zeUJ1qb;b8Ye<_oFN(Liiq=9|m?<&nI zs{3=7$II9ERt|!b_M5cqI&bZgk$2M%cf;bfyq4v}XT&`|K!Fg|-yc*fusAATDj2Wf zi$i5t(s_74E;}qgrW-aq-s$KqeWlP5V#1D)19gC|AYq*7>+>`xve``gYQFl7ho+iq zBFB%5SufR1rpR8YGJ5#c^mszO{@Mk%K5WYl*RSIUu5n53ng&4?Y|4O`-SLE-DDj7WXuchg4jhv$W^-3OD z^*hGR?e|bX0)E-H$r?WTTGLftf~5uV`1n(ktMzcqB+p^7s`^(y4Ix5Ot@u5+k8#O6 zDGL`AHmZPmSpK9Gxxl7Pt?wu1L{M7`N-zlgz%2_ir(~|t zlvlK_45V@1P+{|Vv`7Co*^b-zh1ZPzDSo2pm;Uzt4+kzj!m!Y4`hnI9>&%pmPq)b( zN+AHrrUSBM1jtuxUsZMMxgyRZA@wp%OUaa3lFuVdCYQMT&^hc)$BnUrClje-hBao% zQWf0UDv5e8o@;sY8f))O2dxsD{wO1w&m1O>;QM=yY;7|a73%kGi&}=lWP795Osjog zSU`B1ByX1GH}>#-&HFz!zv_z7KMA06EP0EiWk(6^_H*u^gZ1Bbfe(N2KP2p3YL#pP zx)@kwPN-u`>3GON1v~DtC!A}mF)Z$n|8ReyFymA#AU3WQ9`RbIAq1W06fcz;OpjeJ zGW2L!$&xMTo2EIgoM%Qt)f!2e5hlEpdx zVE|qs3()%!hoIUB!rJjk67!cz9hO&ADGx^=W(i`BAi(-nQJTsZa6$#13+`KR0XXe= z{PsZ~V82L#eGl7TQI1oF&4i~m?z}v;+!@#>$EV&Nh^uHla30*!qqO)5agf~#C%b}E z2g74_Y)=ttvp%%EtLUUNHA;jxCtT8&zf<+h zbSIj2HBCd5ua0-9vZ57KW%z>VC^@U_WNppu-LJ*9Bo=z6h_tPrccmhvUumee;H7CF zySF{(*#i+1DLSyS%Fzy@r1`#W3T1&&-(fU6k6^T-W9d6M&iUc?UC*o8Uz6gqU_!7$ z;E@aa=TuJ40)TkR+m1%QA3W^Br7oYNsq7P!j4?6|(Nr_HbghBL_ah*)Kc ze-DZ_ApSw@eEKfykuA|-DUiQ&n_l)W=dHq>nXp-njX#l@)O}RfwIsxO+*e#j6|M3u z>^7G0j(~6kM2ye#oM~tCd{k00%!nO@wA@URiL6t9_$9w~=Nrp{4Md>bX3D(y^p{Zl z^US_!0z}7-CaTh<)SL^M0&x(r730mfR~h05737}mKbJ9_W9GD|eu7)ZMu|QemXhu2a0^EA}z*a(z-~@&l8VHZu*^ zj`MXIQ6K!ghgGsHewQ=BFbJ-7MNuLI$p65#ydRolSi{#{i%iy5_1I@J z1f)15P)Nt z8Q2;faBBXb8a$e}12uX_<8O$MvoLmrXo0u$d!DmVack6k`q~_g`TZ^pRfdjYX@L_osP!gwx6^*QpO~|ILej; zytVU;i>j#)8e?x+!e8=V?+MG5dz~jxub~UnFskA;7(#(i|uacN-_A-Zd1B zNp+F>F7&%eYqDab8G)UVaN5aB(LiYJg?&Qu5*vYZr|FMgE%&1xTSOKytn&j4p4H&*tv2p4<>c&tQG;90NNdila|$1%fyx6}c?<9f(1*Bw zo2_m>Qx|Z{Ff#~ZK*a06;?qu?@jU>4A%Japu}=kOp<(u7a|iA(<6}JK>u2E~<*eO5 z>9_F<0*JCFV^x&o<$%vyKph05P2j_=fa~~8cBt1aV)vMUSf?dd!-{K{SUy+aBmII5 zDmQ*1vlD&8ldhY@I#&MU-pZl=Q`HKl!P(7hbzb{C2RD5V2WvN>Lm3_&xuC$JsKnB- zCf%V(^BLmH7ecvxKIUyjB`8q&+p;WcS|e*-YTuOc_0-}x(_x_pio-cD|JH>6*4Scc z2}Sn-0pBg#a6U$f1%`9H8Z&X(uwpS%57acs555osD?5V*6_)Z4K5(c4B-mn%t$0EJ z&~dQ$yLW@KQ_OAS z?k-TY5o~|S6{)v_pnrqUu%aC}p2aG@x~g<}qf~S_R)G6v=>DZQKryC**9G>!o?)Ga z7aBSnle|58tsOcs9eoBBb_kxNjR^4RAv1&YRXm+zaeZ>{;Um+?uEVIUX;200T(itM z{0>Oc-W(dh(;I&uc?Bqs^9>H(N29gZqe|@V1*z=Am)Gl*cKsGImr|66?LI>mhVX&E zTI|3GU^UV=Xh>3I*BHae5tsSpz#Z<^`9rpHokEOC6GYx<)j7V#iNC>!WaHi^TdEmy zkJy2Fh$ImsHCVYzn22np9EV`5!CUQDjnlY5Z9kc}ofQ*li!?i!)VMF_o1;1uTyiqM zLRl5i-D_nwE?~BMf^1QB^egQz6hA>nMrffh@qM%k&$?w$C#c;B(?ZJ>QAm5>Zs~`# ze5MDp$Atf}7GQt@%pX~oQhYB)6+V^Eeqq~wB&R!Kt+EniiK?Ru(o8?#fFz3-t=)JQ zP$xJ|5CDlDVq=9I-x@>Bq_lpf?zgN-QG>OB4Ub^?{i5-Aj^&rSzZF#5-=1iomWL-H zW?&@0l@1jJGEM8&b6dkYfU8k4Ag^Dqi+~w=S@_fLL^cdTj9TR&#B7o!adxsb8g4+~ zD)^@lzZjBtma$nA=(!rm-Vod<1bss2gCynVq%Q7^9@>Fw`zY07nUuSK`%%Y<%Zh1z z0Bths#MU_M6emH=w_Mp$xK)hrmAihcL+Fgi2@U$D8t8y7Nb_QM>TReIVat6T3 ze2!>U*bbm4de}F&{%cZ@hR|G9ZE^QN3HzV+kt7GUB~-@Mu%p51*6^TZ!#-%aJ%T*| zlb~^R7gUqmR7$ao@a)5{J4=8u(Emdo99G@v-&X~N4$6__-Rjo?2*6h0YI8-EG%TPf?z1TW(aNAy)dW`uz*^1{-`1!9+_jP#!v%$>K@kjz}N8?hK-Yt+RQsB!)l8G#L1U!}bo0EM`~S zvjFpik52x&p|&_M^!xgX--8Rs05P)wUnOBZDm>;_IrQ~kJNlm8@37NdPRc{7Ef>%nt4m4$4xl(F$z4&caeb)+IIW2Poy~ z?{y$VV?oz0W=)u0pl&U`Epb`g;$DPm8?HlaFS|ehLyiC)!l5PH{sdt5z*EorLnN^U zL~KJxK9&s9VPH|KMkK`u)?sl`g}|Zs=8Ua1#`bU{etO%X!jRzLY2u%egXlo=dM07g zV{xzLT_p{jThxbY*FGQGzrAC+OHgL^sG9cLARV>JEWbXpU6v*5D7aqw3e>$zyA8rJ zBNei#6F)XakH~O*(zF&LBW-`x;jYd>HhS@6iFU$;)BWXAT4;QT38Fx!=yRNjH`kpz zqyyO_HI*c{z%xuImm6Gel}FVDDr}c06({iy?S{-``B7n4r;in%JW)w{z!%GB7)!2< zjg5V6ZyydMMw@9I9gZ%&{*KKwW&Z8F2{(AF3#$BH*Hgzys&uBNYJoxv&%#yFo1X;c z$?)~`PODNSeKhDEe!3f0HS0O~>bTsuu>7LjtMBgj7*Q|XPc;uy$$$nuSOu~de=je4 z&BR&O|Bd65Ct)yMcOyjW4T9~4Qyojht0}SgX~*2LdA+6Dn1KVQy@U_dZM??yTy0?? z@j^vAEQ8DaZA*!jR)NqGpMV$pL*tVK=}SzDJ-m#?AnJOOwz`z|ssbCI;ZF60u#4{$ z0N;J8tC+1GiR6jzy7+HFP)6~~sluw`hDFcSbvfr&YN#FZNeC26(8?8P1xumho6evl zFsa8Xqi&ajYy;KLo&^PxXm#^rH^UFf&j1{ckzHE_OD*_73p4CSe^1C3HviDb0`#ta ztE>!n1&M7M1MMo8>3D%%ynfo-*@dwB#4|?f3M)!E9 zylZRCHh%cN$e_(opRw%%Yf&r!e@|i*2Hm6qSasV$tuuG70#7XV8?j4UV2?w0O0!wQx-pS~x{v8TtS9l~gI&V9i!QbjO8S5}%15{!8H9 z46BL5xuXu+bmDP#SgykE*^Eve+5R~?g(_|ZSGCDzps*!3yDhP^vVnArrhljF}an&9tYa8^EZjDsJWo^ z!+*b|v%Elr?X0e)c!)gy^tP@q?+SD?`IMMAK_!V=|KckTrBALzzSv>XzCxd3A+0c$ z6m%zb{4h1}p>)2&FW^wki-5cmVr13n|@4U4TxeC z?Qm>OEu^(RI$9+C0_XZvK91(U8b!GQ#bGHSS;)aH{ zcGi_!&AWQW#zim9P7e+ZU9U6N)XaEcb~?ptF`=#SXKvYcRO!mf%3L(^$kNTtO>1Xs za&mHOr2u79b)K`eo@zdMBEO)(q9<7#7&L0u+wS%2U~v8j8g5ol4+ONF;ZRFvh%c1kSxDWzmoE-z)0{51pKB-y5j}*4aT-R6GEetF1h^Y|@9Kyl`FX zsHED6f2dGjxKI|;6Gs-g^p)Qh zJ7+O1n@OMcsx#K>-8Ys&*7nhvtuo~CT7axNq&#_d_uT+uQ*VCzt~$we%`3y&u|~ky z@5Lx$@|f5-@}0nW?B87t6PJ{6CXGHNulIyw9y&nu3u<)n?lTZ8q^7VyFF<;X8*StC%DP(FX(V*8ImQGUUBH z^k2de8P`K*UG$%~gHt?5qyZVf{y5zIu=NQ&l?*?mT|ydH$iGGR(s?-e7EhymmfDum zNp&I4I2gPX)BR~4FAF4-V~h;|O8Y|Dq#VJ+C+jOND-;_1Px>io$*~uz++*VFUKu=Z z8|q|>$*Y_eOP!#Cr}uj-_rd%0E?80Ow0n@D3)cO$RnuZW>F%`$a>6y7f0DBb1bB(0 zNt8!xV7F{YiU1Dr$&^&-;!b zU^F>3BE~91;zYDoJ-W4L8!g@=D z!Ww*4Og)-L_S^m7E^kywKdXX+M>glx??1>@fM)a=yH$i~_@tv->pV?ZC}ht?_|8Ns zanyHhR}((vxTvdCd9+CvPQ7wPjS=XONs&{SoUzpQu>^+*1coHuPE_fZE}h-p5WQq$ z_a7@=tYbk87;A`9!z3$bj@xa3?pW{46(Q?0N*+l!Dav_vIJ4?+Bi$rInO0fvr0*)A zS~i9eeve;iocWIi8oBi-C&e;y_+`CK!pOV()Ezc|gM-mNvsp0j(UoYvD7AnlI>Ghh`UoG~3Xv>Ca+7PD8VZq;#^#2`iA*8l<$a>mn zSwVg{(FtSoH+4*EXz&Bci{wMWuG-&^|v4UE78`7a%N)QUVN zPX7}X60MY3!5uq!?2kedk8s*<+c%*EvvTFxn*XtxOxLr$6jf-pLsmD(Fc~}R;(JO( zuR)GCio=KCIa^ubTR0IQl~~?cfsVSJDDo7dkX@N9YDZF(suLH@tu+zuNnwCKrZvRJ zw8H;2rC#aN=C3qiJl;`T12m~ue5U@cc!=!86*qkUd&mhlGwPBi+m!=3)RdZ(@8M>< ztATsr<#k5~ZyUmoY(}%llii{=D*wHiq4b7-M!}j*5lKf5zPu88pCeZxjeeFrp#n_n zlHGQc{1Z&+k%o!qD|p>*nQ05qQ|KXSUW1zIKYnqgce^HGf^K_c7 z+$r#Q9q@QQ{cTz7M95aX0J*(11p{3TbbOU+%0Fl3&&h|Xy;Ws>nWLGs<~ zn6VO%?>NK#?onE}>6vOonfyaXh<*OkY+i?6!20>5xvf?t3S2Bfh(0@0X3 z#{)Ln^bI(L&dyec&6|R6kEBn=lr6C*xXrA4aGPu+s}}pF!)0s9IBXH}_|Arde`Dk6 z|6V)ftIU$<+rXMPIO;4%>b&mG;at#tm(y5I?4G?D1afpPz*J4ScmT-qYibNwwO)gq%Xk7@ z{sLeoc|Ci3`(ywK7}H^3XT;VP0&XM+=_0H~2rP$?DZIg;1Z|~=h=@=*RuO18Fuy6` zc6ViU6}!`~4%*1(^l##Zvu^C`pasGG&woQc5oB9fY zRi{U@G}h^+T8KXxwY8EgzMG9;6GliuagEZ4pLd7N!Q+kG2epap46gtQ?m0~ys+>!( zP6d^tYmYc*EWdF+{@sfq04iu?4$PkHOemVvxV;$|8nR(vI8iz6@3S^pD+*TD11KUo z;q*cNU0Bsps*ibl1oM?ETOI2D_pbBmrM-B_*QAKCz$X9cYkc*jWvbrCVs~Y*cXG1G z+3YJoK`j{wnMC;EpKo#{-n%PZ#g{+P7MZHN?*tvEG)SQP7cGBAtIH(NDBrpBz;ViG z^MT;Kw`(K4IoDmQlLOzq0G#i~)U>P*ogXP^<<%7w6beo1B`z^bXl;_%ey**p4LlQU zI-V28NYNsswu8^Sf3X4U7v1Y~FL3-A**`gHypuikfkx&v>1@U`(5e!8RJ)n1IVU@e zOoST-oXZdvQl^SSzcpwfAES}i!5=^sKn?NUf2&_uJzP=2Mkw97HRRa(<^0an(l$62DqKL^3)fL- z-dtH*;~lAaxiKKLH7=6Vq9AjhqlY{b|DKRt%^D^(%gz}_djHSVIucc3!3WJW$V3`` z<-u1zKDBaBW*By-hrVk z_6bse`DxiV3HGq}BSOjk#O%RF39NeO6p>Q}w-I?~|}~&%hVKhxPmMDQ@!W&-C4i@iH(UVxGm^ zdqs=6S^Qaw8lkwJU=kaK#SQ}zmY-O z$&m$o*gxZJP~Wdo(F3M2Od@YJgSpXC=XyamK?>g`yfq)KZll5wU|A5jGpmP8)t9PL z(Mplwa7fhj42V_0njJu%ODQtuqwQAnpueDDZ)4JMN6W>`$s5*!&wG>8;yAp+j3T}5 z<`VMAZ*PD3L(i+j+5UUutETfR3@m(nd?jA^#rI=&2a~^TT-$=wLV7lW{iu9awp-~< z-Ga|*-95+e1B)2+DF`(0@q}YefEhJbc2X00ws}|~yB%eE^m>*XWnS*pc8G}3-QSoj zF#UMj1tY!RvWh~pT#;7i@_oDahF$jUiep&F(tbU?EWFa1eFyrH-o*2p{KqS|>gKE5 z3macO4mc-QP*lXn;7m-Cc$npMm9Mz#H3%#xg|&G4U3?=vXz6@)pttx%N%u6 z%2Uw9Yau3H*gE1&;O#G+3+MMb4+nl|&SjJz??+lAV46-AsQUWJp-WC-#HnXud_eqV z8T^iBdy6>s0t|*KE{?$WNUB>j34+Mqp7jk(5SH6P-+B)%v3OZAt3TxgFsWS6-@Et| zggk@GMx-U8Ce<~g*cDplG)g%rKyOqpr5=r=-K}Q7-8v=!-;To3+pkl!XMKJyM-%7ZkSd*b)m z>dTuOKTr*_Uo2$-0Vp0;7VBeFSDA-?;rT^JK_Q6qnpylg0r6r^YdJcjY);#gDhyTg zme~1$0T)gtnwI69y|u>uj7vbvTp;K^JDQ=A{e_nHSc$K)8!Ool@8`I_A#012W~h6& z;i~`nSm5RBl`R-)7Ix>(C=+ofm+_~TKy~Dwio^Qgzv4UXSgc?jSeEQ;Q3Zm$!mpQR zVE2rl&C2uBaUsG5O_PqQIh3pm(U<?WY`l3{ zBcu$&Knp*ZZ4f!fnE~?p`xzGy$^y$B>z1*fPAp3APgrEpYt4Wik6cxa`-BTYRz5%yajud0Rf zKTX=J#5;-}ZRBm$qQ$MAtqA)4H1_fF=}H#YoIWdO*9CTP(%7CU&Gm57@5yw&0)K!T zX=)+wA#iO&#xXI?V238HavXp7%8CvA?tt~p!;ZF7syh5Su zMb5gOo}Q}Xz3~e=E|V_HZ<-m6vg^G!oeJ%JME$3tljK!ZLDQqb2!6JH-lbZv%u1EC zuC6Ni>KO~Yz~U@3`t#D>-p!kUkf8(5qceM~1m$Drc4mS!7Y5{#+lG`WdgFVYU~pjl4%nnB z7=Gy0vTx#z!nCK78-?0;7zhPaH0`ntCUppy-`-Ni`ZRR#tIp2*34kiE>gGr!6- zeIPGW74|pp9eK$Iw?`NOq}g?hxZm@j%WUQQrbr*rDJd-yRzgw|LPs2L{uJm0Ss*(# zNSn<$Ezr#yJ_O+pO=~(XDP+;~6UzI|J@e5h zxqLCgAYPMekp{%>#;@z`#I%oKBHJIk$=Ta@K9~)b!D!=nB0RH#*x9&}Sni=}5}v5$?9BZk^rgcMRY#0?M- z22+r6`z=%y#=oc4l%OpwdX3dP^;x`(pEn?o+F_rfb#U5$mf%h*zVmX;mEFJVT-8J9 zO%An5TU$OG)$K#vf=>Wxtz`$OCg)~q(H$So>hf~;XJ{Niyra0&g%uS#0Zr#{2-07~ zZ<3F)a_5)f=_3MOns|opFTjzSa9Psw?7@~gd~nHv%)ei7@Egr^;J@CXc`n9tr&l4P zxR@Ux@FQn2F6o1r50r*Q16qjXvhLnYXK7K|GuU=nKLL32Em^A~mT1|!oU$@u0|Nu| z#`lyY2B$C}*u`I97>Cl6KDo`bhx#X-?hpvKJhX58(mc2P=Zx9|Xxp?!d&K*Fd#|j6 zx76x@W<#kYoSVuHRs#a6UJbh$10n21drjSq{ODdtt3Bg9R5@sL>F;kx}j^_j|fneD5YH zTRf*7!WZ96S@=E41yro0!%*~ZwDEty(?77r2JRE^(sF@4LEG4v8G>*SJ49|++H0V9 ze7pZKW|!1cIxY3=v)cn%?m%vqmM}Q(UD51cQOTy_eZ|g+YrVj@{1Xv=;19D5;HrY&dGYJaEPH!+Fp>rsxVELv5C+7d)$L$3^%o5T~!5A+xRocWOzz-Chg|2d`g zLOb|(^Hl3$`A}i3ajgw`&+!ATj6WI;Yy9`N4T=Ew%O*`a-oiiG{)tFI@%?&}@`#$_ z>G;$C-kd@~F(l`C&TyrJBKqHufV~pW3cNIwzG8Ou--szF^Z=V}w{CS(<^8{Ru24|e zf=wptlg|Pao&LSygTl=3V_fjrVcP%a6(gr++3(1E{Ld>6Z`D0{vdsNIg`jUzev<5T zg5!TkS%2B&&Ye%^1<&#Sd%p|C+NaH{S9r9*BSPMp0p5c9?%n6W%+ddT{a*e$n{VvsH7ard%Nn%6W7{FZZ57!w(^RKl@QKUGYw!93W|!So*}_YPGVo<;@Y3w zN*56owFLx|mD8J>|K3;fv*)b9iJyy!Nl7tCBnkk2j(>+Qzu<+Lq)K^dP*PIbym>SG z=EV0;@$n2`NudfYr`p=C?Uny%=T5?c=o%Ln*X^p)W;}pVkZ@noLYd|OzrI_QEEDTD z@=u1y=Ixj`Q@>q0^>Y*wLhqHy@CkzRq8YnbX2!Lb|6W*g=VQ@OorJRSqhs0=L=GBeG;^DhgHe5azYoBtKby6q{oI9w^O%u|*w zVx@O2`k(y4VgRR^(GD{i?G274{S#sbSf1khJLrWtm{e(S2~B9dba8gJ@BXT%|PMzQ91=0EMhYxrO`E%4}1TH4wj>u;$}$Avb?>WyLvl^oE?KYs#OrMUhAbgz$& ze9M3Qh;R2#7T5C-|L?`8Xd%fS{!`fUHyrJMEOh1p81`Rlg@~KI0KA`i-x;Id%P7qI zx#ORh7heNC{XwXuN^v3DE&Nsgyuq9bGzQVM|h0Kp;z7RkfHS3?yjz`fX(fac9v+ig~i3h_RpU`@3b*Fo=&EMZgLB} zhe!EWXQ!sQc{(7*cod1HFUGt-Qk3Ofqo-Gnx+g4L4*KK)!`wVP-DJF{luPX-qo{*X zWpj^Hoe7HV4d??r!ZOJCR_VJh!yVMM{UE=4mQ;aKAM8XK#m5!aYSUImI0_3H+&W>P%QY+_=26S^$m0p!a<`uh3?X#aY1b8|NL z)A5`(=L8);_Fm;TuH`$NYYlyA3dT?aNG&=#x~$yXyWucd6NL1h-miUz+pm^v8E@y4 zOVZQtbf-!ig5_W555h*vtZiqTK1Gdj3)Ax%Sep3kt&N@-{_BedSH-s>V2%-ZXdD9- z`hV6&%ND?n*)~WRvkO1MPX$nlr>GU_m&pU)2GOB+Sl4pnv433ID%?ZDBTt*GZ%t0a z7i8nS=Utm9L%v<3qf?9m9gRo$k=9W9Q%y~4Yj)ubLXS;Qx(=0wt*xy@&@K@%ZwXGb zwX(Jjx)};zc=h5%PvqbI=|~3laP=%H)k%)TL=8vsyi{OuTKN2kJA!`2F7CB zYGSc%;TO){lq>(igcIQ7iy2-W+?cHG7Ba5$e8C^546L~AU4NozL*A(xP^@Gr?@+UtObeodZavs=?q=Ct-t*fhSb3ssj7@m_=JcDjxb+=auYIbhMi9BwO zWK~F9eNM!C*IVsoK%UMmAfO)g<5lN%I(mBBi7Mw4jah7vI-fp$$_SWhDHcS9-KcrP z)SwzR)=Gz(F7#pE6+rvr#$2-8tk7l_brRm*( z;z)Aytf1kqa0>iu_kv8G+ox3I9zT9xT3UL?{F1|>OxW)>;7w(GQpuc1Uk$?wJ0Y|` zHe8gA>sI_uTw-EAAedGi;fIC~=`3}Vc8%0U7C~d^MyXQY!O_5g7Ty)uRs_7)EN!N5 z#--YAZ%se{U*;P*4<0=daK&t6qFR%kojq#oWETqcqv*p8VK(l+(jg%s2H(vZ15!m% z!?q5&w8i$mV45nas)~?@$@M_%j9?}HLnbe4&_75^??1~1nsh9iUdH7?K?Q9W>7Fp6 z2=In0zxj=-HWRgD*4!ZA^HUA71;@=xL8Co((7ZA%FJJzwWA!xXq9Bz7ISq4`u9n9r z$-_FO05H)}z~3%bW4&^14|FYvLC6%`vVnF=$_@KP5V*nJzDGY&IYBX^Y+{lM7-AOu zDhLoo3dOk#EO4z2 z_o2`npbcdqYCiqaC)L%}i^HZ5l*q((9tn?->aAhgcPUrQ1I^b2H>PzQPDc&rf8RlY z0lEb;x#sTPmo&{0wqV9|b*VJ`Gw6S`s->f&^Dtoa&z*Hrr#43$;>ynCGtW04KUiZ@ zsc)EFr>F14Zas?{?GbQ?=QxvXRN&v5)ZyRq6(t?74G?@Su(u8VwSPybArtB8=)|Ua zuW+@tvO)4Sg0z5-UJnqCLo}6M0%6ni%L->z)K57 zn)D6=p%+0wdWT3T(n}~p=n(RqN&NZVZ>?{wyY9Vz-uVldnKS#Gv-h)~U1nfOus28= z1*QiFI1a4P3}nWR&TZaB{4OpldjVk$0ASUZ zFEc?&;6agb7hi)q=>utwkhA0Cx0!qhfW{A26!zx9wnyw-4|nI@=PfXw2lI@Xm?&|i=O1Ivs}>%ikI&4^bj9YD-e|dk-C3K0yvWJt zb6>}Z6tR+}>X=dZHke$c11uRp6et{~lpDC1n`Z-`wYiX< zaN-zpBF|-Q@{ysj@kyT#pf+$aps8FNAvl6{adFw3{KUNh3Nv?vg*BmD4}bfMA1f|a zae>HUoz<7&T- z>OTN*L$jUDM;<=95bLcSrr+macYv?uVi$t+vM!7CC}u5skh+YDB98>-4mMapSyQvT z@MRZJ0#z-o9B@O3B^G*)-br8@5kslAwNOXx4s@B)nryVPyjc;vz+7Sbn zEBW<0ez-%&tA@T08YD^yy0&^)4jSJ3#15?B+VX|Ik$WoG6wY10pe^rG+kd^)KVcp*ITO z7x%-X{!qf(93M`=E=m4}3IftscMWXy^JmWz_x4lXjgiPQ>Cw91+e@f25K#?Ju%W>VFyNMV3z>qU%YU^z_WUvTYXbzz)9{5 zG?bpibwI8t-U!8MfI5MM_pZX7BShk>rw5(*1WvflM5yyRF1^5^g;Ui_z}?ZLeeiw= z`FTkFfwT68;kqx6FIcY9+$2u;?k{SH&(j{61x=;x2GZG@o3RG!{kG*1ry$#bouP$x z_5r286foZ-M`k|`t67SIk|cDFRsfn_JBTHt=Oquo4D7NWOcPGpAV%jU4w6vKst1ck`T7v71}1 z0*g;&H!E+cYvXo)6!VT&8W!DHNLI~ua~GL+k(!!rq%iNy)gX; z|DA3oJojjxhRxHQI&}(E6Xc++-#Z(DeqB_%^n0Ue@AXeJRr^*ly@Bao zX|<7&Tbh-q{5*wJtKePZ7Z?+yERZmooWhQ{!w6ub1E8}aKmW;UxK6~4(U~*&RV78y z&2Z3`^JAJAo>=&|wvqQf)5+7wS8o*8&Cd!JkL({jDxyE;f#~H(y50ci_Vm5wD(F@z9o$u{qm2uF?Z*{EB2KxxN zY&^}HlD^RHU#qfP<1r-aX%O_-*eGVXf1HIW7kf?LGZuRjK zPrjY3me!9cXo)I!HxuvON=auoAMK-$i_Q!k(xDI>e6}iUTOK}bo9RC8**G5mC8b?% zz*RQid<5JA#%AT=T@vwXg4ZKD1nts{MDRt4g@VSN;dZlf5mj3hxI-xfr^lgthx4BC zgXSsa(1^7kujHeg5)RAs4Z2sBgHlpZU5J3*{wfi}*_jgvQIA8B)UJf|$xVjm4m)pt z?F)4n%&mdDO~GF;@9p0d)us#PP-`)-ElWzIJ3D+(H+|DI=+xtut!;*|(H$ z;$<5HgNta?a>X;Hq&Bexu>CPZHBN0t_+~{<`uloJ_w?U^Sa&8nci@y_(#x}gki;CG zFO)Gx3Xh##W*ex$2Ts&4%#NlPyIA73da?#A4NS&O?trDXQ$QF4$YJARebmB*+^sr1 z{AidZRH+5o;!$*sM3Ld7Rx29i+gg)(Ek&8>9=Px;~`i- z=3EU%UescPf5*08%%@tfSJ5{XBwc*PG^&NJ9^)7yZBb_|Ire^k^Gs?L`>;PTd;c)t8b&sFrS$*W+xx>|uxhw9NnLXE0 zNw%hEfMaKnyu4yM$viwAdKW!zfv9YJe{6et%{hwb&LkdQttw9H%&iNZe)3e!{pr_N zVWDIiacz#(3GF>CFy1HqDc(t+8Eq;|xni(8NPWUJb%GJ9S5>e{WY zl1wo}LmwS~iCPjY7=JbyG{AcG*_j!kl{J&$^_d~jcp#0cy2h}Y!@^}P$HN2g<|3qa zKEk~nQ#N<*7*%fC1m9Xh0|+u#lh}*ByIK=u)@zB8?A+%^AcQ9F^h0oc11SqEktJVc z(b3=CYjdJucDs9i>-V~AF`K=t?Z8BX5KvpL&s_Z3@SbfD2ZB(q&R!^vrjI!jZ4U(0)m(9gKj45i!6gnuRquz3)(Vjy&dX4k#z6(Nhw(}CSMjOjI zmk7W8>e~P|=DXAa{{U1KKG*l_vx`&R6+dRoVaP=4Id@Mi&Vi0?uQt*?o&B}*?oe+x z)_rVVD6J-j9=J$007>G*Utb#5j;7W!i8E1$_))m^lma2cqoOiI-L`H6Ur6cyUWVuk z(B~&o&kjWRjk`BNi%Ub3C3IlZo!4a}BQu-8t#!R_ovP+>TKcw`^H29URoCnIqYZgS zYvQvm$+xyY*LoT-85+iWUsU^gU$2za^mQ4jS5rsh9|=qx{f^!;?jIgDTiNp9JRDj= z?DJpQzSs)Dto&!_x#B^WYG6Ds9L9QM$9|m>HJ<$b(N-u9BnFM{6R`%QX-%bS-E0B> zZ!%2CZyVaTEsDsKtcl%}H!wg7=lfqkhJLvtD5%emUjr5&Syq+^hvPMWSoE&0CMX6A3VskW^8 zeb)oLBMEu#ZiuTDw|0S99N;$T>6y9b+N0^?1P5}OcxP9}IAc|-JjocgHj@AHT97-G zBL*Ol(ww9wgqTk=E@-PYv+k?n;hGjW8p7HQr^1PL#4%(j?`C0Dkh~qrnyNC$U}-)j z(Ey1boDY5105)R>KVy-_wHz%kV8NVam63{JdpcVr@s`!xe&wIl8o1}7VsEW-kxAyM667T0U#ag zo@0mzgY1y6>+(7 z7Y7AtoVrZwHDKW!UiX=2?d?o$t+C?BhMECW-dHN?`cqxFc6wq4X7Id0aJ5F*Vjd3Q zB#g=Pi85w5NDbrDsIFq*_Zs>Guws@~{ih0@^4fD4u0j0go!<>C!TjM!EP%HgrcV_V ze!#wF7sjn&j(a|}vO;4OJ35q_u79em#vOF*0VdF=mh%k!4hO`EpVxR^^duEie;apg z4n)||RH0v@pbz20yqC}EP~L48^)=d@8R^JBsoK++nsO{>+t~u+Xg=1gX(eO%;1}Nd z@F^fjrY7$07>z)u@!b7!cbs1~!M&s;G^J4g=vZJQ6*~{Ndx1%gDjhO3=j6DHW@qIp zB`xOtY84|Y>+2vE?i9w)#weT+J24cmtv>7~?9LF8-L%QRwTip}K_D2xUu2vXX~X4; z?73M3zCn*egU~8~?mZxFUe&~Ka48theUcV{^kJ2U|Dpcw1)zKRz-6cbeJ|fH^AdK+ z#2EZ^pkQciQ(YhC=ABwd03ug?9Ta7>R{&1hD9TYZ$P$zHlDih#vOR9X)DUE$ms{Ir z0A7D>O9iBE*Ua1@>5C*JzQ#Tf{46ad$1CKf<2nq+yaWA@0GIN%LhbpBVoVyf54Nqw za{gH`1^l`2KGVybiY>KWbjoX<@akteNm5w;jrpp*V8g*SZkbSs3>siikS~vl#mQ+F zN`^7%QMM5 zF+kCC^Ffk5160yXz(b=?E$^AKk~y$<-#>!>k` zhNhGR zk<>DU2UJ!HM9dB49iHh0+z3#hmsW_j*z!^$YfxU>J3GYK1U%yq2!{|=*R_!z^6LZr z8)(X3fVHfQetj9b0a(q@$gU_LGE$ST9473`0l00r3YfU-J_*gw<1!htpK>6SFMP6j zsV)CBVA0B2^Zn-AlT6+3j#Id9;9_HALCiM2x$jvH*U(rD7!04TgLAcEYx_{4*`X<* zDh86_hxKHz#DrbzoYI-MEJJut0)gP!%;*G>N;$SSVS)`ncO&ARJ!H9-gv=wC~5dhiVJFYV&GHE+B3W1AiWMmPlLZ!!`= z#=7qvjBeNaxg5rx*LYt+yZSh7Yy#vJ2LF-I_Z2ZV+fNf3-*fl4^cBRZ8G+K5WJCSFH^y>K z9v(O`Y!3`N(w#rrizxXROm`_2q$?KMWOL=N!eE!|_6u&`JOcB!x>n`GNr!!z2JF_F z!9xf);crw~iCd|J7`h#2*A$cDc+m1xFs7+sM_+pj9y5>x#i_1~&*eQhy0qAPvntCQ z==0L$MZy#C2E`zuquOQ&#n?I5jGlxyfzTb+ZRvw70B^gGC;4m$A9b9jOtl!QWZPfP z6Jx{_T4Lb$Ipu9yT4G|5>6tDrEM4p_%a7>ay-VY=PX_hTbyle2bn7la7~fQuC94!+(P=%0U|AeQ%$N<61FR*+{J9prY} z^s@AerJGR37$(8sdBHC?0`I7vpswcF43Vf6M{F6ULrSuA)FM^@x(4eT_zVu#VKA-o)g}lpwdA0PWI0g^LIaxd@+dsA3+xfYjGh#%CRHAw&ogs z7EMFU+y2RDfE18~cw1|0@7^XhA|ynQNqD2tX|FM<&CEpKllXixP_~A>_!AO`M@O47 z?SEB*t`Jhfv>qeMbm9h75heF;NV*m8EEz1em*En~N7R)%2`CA73AD;z8U8`MHsoT* zMVR*{t>rW**?`{FP@&-;$cis;R>3UI1iKaGYz;1+1Q_h$J3C|)*?&rvhQKSs$!|M%ll>o%p`}&UyF@uD@Y&iwfv*GW3?75=M4yB3CwSzq5DVX<> zw;)4V@D^)%-MW0FP|8J4F0d(xvF*lD7&QyX*MsiBhKJz4yc~mO(ZkIKMi2wJphmL3y#jrKN_%JB>2TL+18TvX3v&o8Jy8K8&6A?_q24tk`S;lXs4)BB1Z z&7?)L0Pm^8EV!OCxVVT8ka`C8K|0EtoH`3ZOg_OXiNBuv&#%J7U;D}t2Plk{fiiJ3 zP|)g0Oy3*+fyH8H-UvhytG|#%g3M@Ee!lvH2XCQFaBOULP7cS-_7>q--6m@ zHlVOky3B&MK0p-cj4*WO0bmkP3RPRX<2CeG7*Wz|59iECeF{rKkl=YWH98=)C(l4t z!8Kw+MV?4!*BZPdUJocINT@pg{`Fw_XOT20-p>|727aqAd7%3sE~rh znLm(m1ni0J*N%tNncjns?^$KecC-`oRdr*~(i+vPYH~T89gNafVj1u2?oI@h@4gFo z6zy+%ditnib&sK|Zi2)k5l>yog7(!l{Zw>yyNtw$0s#_w494)=wHn0XM+PFV8>A%2 z1ygETM*pI{vA`AvslLxSo*Mx&Fb-&L92{CDGvaBFO%K0+(>`oglMLL@O@EZVXT2l< zNp2rG|KYSIIjl|ydh8dHaB8lTiB=v>;nTO}WtiGu6SEu^AECk0K&jqsdu0GL(~|`` zr4JKV+WUzlWCynE(=sk1OanthvCUS9$Vf=G<@D3ZZ#)&Z+n0q$Ut* zV80vLf_AeeO@VYrno$v211q&!s%J>fws23uZC?2VSPKxTuC?Na@ zRPuEJek`iF#v=T1Ggy6N!t@UykV*3bsRyS(&ed^Q193B;v8-6`2 za*QtbUPDFcb?}9_T(k1_&(Lf|lahRcZYOd>kM@OH2|$s>x_UEGAwi5YGx@X~*vpF5 zpgf9sT@mM(XF1!iLsPAbH(@vl`y>m}cEcs+o1laKM2>Iy>a?mQd--|3&m8%LAX4{^ zo;lP%ArIPfrHQdP(L6Y1*z&ChxGA*FA>xi4r)MuuNmapuM(wGgQQ~s zmC3h*`MS){~|g6Y#}Mz2{lb9wawiA!{N-KntBK4 zi-I~WilU?>kT(8;QoUuCe^(G10;XA{>ijYxDkDvQO#}{$?7w(<$a*MVz~)Q&rP)<4 zf*TVt@qhIinysqpjpjAQGqCzM0#`J?zW%7ir7l53%%Q^^4R_((t5rCK`13G~jldPW zqa|2wpjyD{ttt`zL{%(^%K+~Xsgn^(n zvTa8B)wKa)eFx@N=nkexJoNPFze7!L&{1%WRLBEAMndU%;!zh0jFI%?i}OIiNQW$> zq{Kr{rbq6`QX@Kcg@9eH@8KijAJD%4t<1pINe2f6CB(y<73dP_#{$UBl8*nAh4}w*&;X;; z-QdeuSXEtJuhG>7&{_d?l=VAQ-vUYis{bMuq{WSuaQ-MZv+1h(?3rHnJ#}?;c5!h+ z#rSp|8I=j>(-*NPUB>VbBe2)s5_ZdakH@NfGg60nE=1G+#iR~N1y9^(;|4FMf;V45 z`7^VkVpxRlwMntx=p{_PX2=#c%jsgz8guZt1(mS9pZ!UZ#a!B7S7-gma7yAZgYg52i???F?pZtb$GrKZz+D&E>H=$6WBzPQoe??rxql>&N1ca=D>%I43T9caDQCd{ zO1A{+><@X9j~3O`#Q!gM{Y62*QAx-D9e60)XS@eZDbGz!?-UUeBQRK^;%g%#v+p0y z9cg}74}yfJpcyRw=N~lcPOsUSqyTMdF}f#95bA1b_x}SI` zS_EZ@TfDX$E)`VY1Y(bDeia={nmUZ&nKtxvHzGRJJfJpNt~S7N{WN;HZ`d}-GsX=TJk zl+P(OK!o|W)eDO{bDD(huBg1ZXvK2#`{MefrEVC^>ca!A~Kl0t5uIx`a74}ZtJj^)x#_?nCnlbsJPWq_B-DoXb*{ZV2 z58QtJnxdG>XgMzC{Iz!N3irZGvm@5!zk8i$LruzwQ6#KReqrEuuSpT#(eD?e#rweZ zef`ulE;P#nh)~HI&+5R%+Rh1`kpgr`g7tF`$Gl5toIG^}dc?ev@~j8+x}9fL(o`Hs zj@>)9FMDDpdTVetaqxR?KPC!C+76d7j!`Hev_oLbjPsJ&oHv$hj#;ZydxvQnfdpKoGfwpi4bTSlt~`wJ{UHouZp+-_PBt zy1!ISk6UkSDbim}&)&}!cG5W+K$*FqT;z_BA95$5H>@Cd>@0ViP!_XsdvqHA6ysjs zw;(-^Un5-zI~5S(0AXh$c3Jq_T-^4#au1P&G&;NV*yi|7mwUfHr`zfD`^ZccTW*_& zrXba4vw@aau3JEVd)LX&SNF5P0TJ31cv@gw+!!zr?HS9qJSglk>*Idt*1Ni}0g~_iA*@6bW6@glf!@w6%6t z(-|r3(WYxM!TD`H^&|UaL5ygRC)QSM{oxB?n;H{MiCr9Q2NFOuii+`IN9NvH~19v_A;z;??gJfEM7S zAwkxf5p8CbRPrJ{bVM^_=A10(em zFpe1$Do|w($8$rg4Bg6O7@>jORz@2a1F_StpA0f<7=+K_<)Zr4M0Y(WYOtQO(B|#P zOtrf~MtNhQL*DnngW_{q{^nh)N+A{SL`f8`QR4Pr%@Ot)?8YRuNd^ns>NTG* z%z$$sLVhdOieGwOXtFfFTsu3B!8k$aL&rv+5XNg14#5EaO(!p~lxY2oLNUX>R3p%o zA=t+Yw8UtBs2nc3bFdKOs;+5n2bp*7lEYjlb}05VtBhjxS2=9UAf(O6!C6?>d#U_p5$OCo_M zq4KUKLuoF2)nOr^(FnWuR-v*pp}c8jx(UD6^>&Sp!fu5>xEbgW(87+@qR{u!yQfI& zQ&6Zw!7X*%%akrN=DvsRGkAiRfYC80FuB+{3p9%FHD53|5zbX%s6I#C)Z~;y zfc$}widrVnj??HnqrY@F&D`_)&(ZqF8t58m){Tsw{S??01ZE3Hd#97oqjL^RfRMf2 z#z$kdpWrE#Yw)tSMl8LyoT8{i9~)DEbtk#HIwn`kj zI;Ua-lQf(`eK7IabZErAk(@m>=qm4XG)>m%FkvsQq4U`sgSiryMg9sl$1dVE0NGiA zzz&-f6f7U870>jth}u;AyLW@sxM3w<0r90PzA;i$MfH|%42sRcw}EE)o2uKj!x`^e z1f8{Mr)>b3>+7zlJ}&*pml@hKHL+p^V1*rL5QtVKlFLGCuKFVNv*=cVsHLUL5F)by zSmBvfSS8Q6FVrbI#x&-TCvv7yd}AqNlEQQRGE39zW8qCJliG2*N@}KRyYdT0@Gq;% zVPhlDGsGFsSVbU*CDrZ}RP=b|cKkByZcW5B9FIFPWnyu9KQJ)hN2g>-y3<^=f3~y5 zX&Zu(uhrvubJ~2TyhAzqKGda*!iny;qyF}K+bkVmBF}e;l|~rMZLtKk_GTbZS=#rP z^dn1}_lysWo6DPo!nKdvJ}#gU&PkpT8kyyWG-V>mOI4>?pjjJtkk*8{=bL?gsU7`h z9^j|#cfk$4PWN0-!)QJiz8Ho@+Jq^u1jYfssdsr(weKr!&Pu5d_}v;F>gTOjCb&_- zn-8(-j->%TJ5_Jy7~U3p6}-^*DeSA%S-t<<8?uI>(-{62W?lp!vE+*FpKx=3A)-~5 z0Oyd(MG816p~l8)+HxK*nRPVeg&)-9`eI&mDV?s}mCYEmzjH7gI?;)|h|0w?82qIQ1^?pCt%QYCn%Z!1 z&T}(M8W`!!Em97&uE{lZy_bBv{=cBK5c9gJ~|xV za6G9w*w0Fla>$l(#XU1QuBu4anynw(buo%1u4mH2ItqUE(`oULi&w7F4}5sAP5q^D z)T^`pc(gTYHAu{G?={Y2$ZUvmAbyO7(BUOn((F~d9|C5iZc!x#aEX%MF5}l;+?Mt* zer~RP&Un+l^ONh%)zm~BGzAFTl%;35NSeEp4LXf^+}p@?WlSvwap_x&EuE@O>3@SL(^O;F)(k4iolh3n&q!Hdfy@t&N` zcs}{{ahWk^omwmA*mGR;IOa}l*QKaj?BOAnVO7HGU?6UR`-r59{IwxLf6{gWIWG24x&fAL;9VewK(fU7;>G>)&+&6 zu1oXnEk)3{Frnf;Sm z*8zhXzP&9a4Lmo2^C~%ySOfV(poQdURxLAg`E9D-^|_pMKwWcR^$(#_wkfuc2b#8S zv}jbkvXNkL0=`hK_N$zA&Eh?$2EHbUkpLVq#}yraz}YE_FUh_c?T#1SBNZqENvWJqx zWquA6CTVi>vs?1^2y%O@Z>sj6-NIo!$-yPgoqhEOm|-u@{f}wD|1taJP>yj9%l|o& zMgkoe%vB>SUvSA>+Yf0B!V6&R?N>>%d5cM7NU0eZOf~?@%`CgW0Uh?>wVd`pIO{Do yVOtJfqlxr3U}^YI3_NcQQn7#5?SD?&9eT~5J-bp?s-yv3Ci6()VVSSqSZ$aVi<7`1;;ca6Hfp{-!#f_r%i6KcHX(m3o{8^KN{h~1i zT_ccUe|TK5+xnv~^UtF>@pu{C5m_ag?ywIZM3|He3F;XXkKRVRWodpXr~8VVz%^eT z5qK^dZnA+C-`yRqJX+Nj6`MnA%XGna;`zDr6Y@hqc2e6r5eW)r23Bz-1qcKg$p$P+ zS6NBG%*lb(#N5f$g4Nr>8LSop5fb%wHZilaaHlY}u(EL!raEftqN1=d7pBtTQD#?m zmb9?8k@I!6cT*^ zKexEs2~+7Rt5HZgxmr+gvvRYtvq*c}cydySAX5mrnp+BJNXh&g0{kXSW$o_nEWpO* z<>kfd#l`C6YQ@I!?AbFmc1|`7Jh`(b4 z$22i@@^BZXq5|Jj{2Ls40RK6U!Oq9?XSDzN`o(|yn(Gvpzgz=-VnuP1EH?%V=ODnqp@Ye+3flz zBxG=jN&UTwXf$=<|Kn#R0!vJ+KUboLNrw2pj}^j95dMCO#fZzJ4TFWHHM7#;=kjBW zP#X?Qos`%A()o06CK^{<3i%ZaS7c<#r-H)57muZ+JeJS)=fDp|?DL$Vk&$Jw?1IEH zY=$*dk&$}|IA&&%+}yfYQblUeY1JFBEUo%Ze1w=nB1_QaXJ#zG0+2t9v4`H}M(v5% z+1W80w5EVLpqE1N7zY=DIp$12ZEbA@92NA^5ODfw`DL>A5xTZ^1!I8#4JRvl{N@5~ za3u6f`KsTCipa!t>gHmij5R~~p||(nW)r_`OjrEdYLVvuy9B}s|M%wq(ue3}w6@K0ul28p!2Q)_{H`n2X~0o&7{t#`hbksEbS zoC3o6TqD8%K_mwbj*5<^Xz`ry{D2VB&Duof%JO0Fd!sF`eOvQ}k2inF54MCG-&Db zT8ZfFV=ND6;*jAs9n7m$+{Vc~&fTT?ywz}{y!;Olr>~`Mo1EB8*^IttJ zGg@EJOM9=i6e(iFX zt!+Wb5kG#y)Py07%vu~%S?}yp-Y@cyWV6FuJemJvR+;n zP_SVP#>H9ln({NLs0=ED9pu)P-XX zxd;!i*s`+Ic9WQ3G|nathZT~~GBLpmh~#2@V&uI67|W3 zItgEiNB2u@_=)tYk02xnsA9j@`||88!p2f?q{u|$mfG%1zC3Fr5Jq-*uf*ulxgDUr zu`BwZSvEfH;j!*=z+fmE@TM@CuFql+b^gLR^zD1P`-3A^9pSf))-a*hXD$!bUEmO- zKlJoClL+AaP*Ev9*HKsBo+#W5xteTh*kr@U!s_TU8Eorj4~-aN!;X4?*@EGaqf=@M3HXZ7;M0~guljm{pwehf(z@7MPV zEMM8#gSBqXYiw7^ygZKv&nmkVDknzAS#R?S^B>=k39q#d>(`Cvb&?7MoY8V7C7aCG zP%bLv(!Ka#X9Dv81#Pl_-3bWDjKgA~~SAK#v_F#i4OO2?#!%d6j$ z`MUKUOzs9Rf2|hEhLOK~`7&>RBP8^Ls$$2FPd}FkZf10>r<>SN1wExgMFQ41oXEx_ zBGR8eXxTmm53O;U!nwM+aXbV)NP@48R~ncgYsz|W&4nTFFffM1R8{BC4<0|)(Qzjg z#JhRZN%=(4_v*=t=si;UdY{Q`F$x&>^=2?kM!P%Xp>W_!7LSXgnQ{U!3(@H=Y@tf_ zr0p+b|KI!7!%wffHNRwzrgQiozMQ`}MC*$qHCZjkN0@jUR%kWxX}ObgJK$=TCFb`@ z2pErjZ~F19qvH;{D~&&&lgyXVbj}Ov&TtFHPD6jCamk2Bkn>eJ{S^1K+ z`@>ix`+*l15u=eu0{)N?CmAzS-VN7(5OvA9J{|@>d^%MQ_gfR>{aB zA3uhN__I4Qzl|q~LMhiaQ8n82K)X3F{qk((+wTvW<;ez)!Zo9n5k*=u7JEvrDtda> zC&xobSB@mV<+{ z*rL971)-r2)n~`rX1ytuiBj`9e{H}^wY117$oCw(ZLf(q5b+)@(KmFYg(E;HxR_8x4;RBe-&{~L(!%d% zW?CeHiZ;w-g;h#}qkqz}5&%9R# zOLHw1+Z@Uk>3@Vx2FoRvJ%vaf6p>|R^B5cb#E0b9s`3J9vheGueZuL z+zic2z`Xrvhi`dxy>Y)eG(0{cKK*-JcXvCY5)niD>W>DQ=85wtkIoZ7e$hQGQ~1t( z<;|`+Gvi{mbOhCykqly+iQLWc!*9(EW{8^8np56u-EY$GrYMj!wXl3P;HsP!Lip6D zhDXO`Y+sTb9<0P7Mxet!f+50f;AfDRRK=JWe>c!qD4Up-8Vf?%>zgYW$e73_1S+38lsLzDpg%ISJD2-reWXKK|F- z)7(7jT@m^NqH<-W5)+@0S%icp8%x4Q`!#uOmtc1w3VxEYY4fqZj7F?jjS4%*AJs=)L+KDe9$S z8(^zPWLaq#dz|YZax)aSu4TBDLJ($KnSI_45yX7&Y}D`+pbFT;c>wRxlrP6kC{g4^3c*`uI>D_rfm= zdZGqIcWPaO2~EC@XG0AvNulEA_od4&#;eq3qpcPz5Qx;2pMRZx1c~IVFVl>(Al~l1 z=nIBq?&recBFz3!sbqTDxQj@RBTPY_Zs2Kb;`P{t3Ht>Zq5`?j;_vN;hwB3Njx7NX z(w{`aLuF|fANyTK3)Q>s8HOi@lDqSsoMFGdT^8crn>(tsh!OUa*_+77qd;A$Cw6$R z3cLhy;*jdv@h$}FA=Ezy#i>ujCVQnNZshkbyKF*@>G%5j{N;9T#xqOQz=e85NPs?>dqIGVrHDh0~QRr1tn46j!2YaKi6OGh@Qv(Apzpawn{lMeJ(o$y} z3rjNU?B~Y7P`Z?AB`+7FfI!;4?}+YNJqKE=JCiEx4%dRoJFk(y4aWBUj6@oNt;>l+ z=dIR#_5S=k$wBbhGn36E+8u6R?y8lR=eyV6uIG$~M@Rd5N0K=#E+66*R*RL1Zx8pQ z4U_38w|*#(oHztM#J0B!#gGdTFj-BcF*_NXg@$3%Q;9qy%(^%ls-l*i?A!^pvKYUS zpUQJZp;HR(ta4mORLI9X{MOntJ&Yu2IU(ck>Y94Ua?H`8pgfSW-p5f|(iYVi2JgQ; z*0T#Vdv_4cTQxHTOPc(^KDyHH*Oq+h5x&nMVZmE;Zb)4IaVK{12l2jc$X=_!aNynh zFW+}`bs>;&vU#KXho9QoxDP_cj_45YJ0;Kk#5w6o1^2PBPt4TR9O`?kS5#9E)rwEe z45l7%c|78@SdKiRv`d@{tsLNg&sP2tx+UUNxD|byniSGpu4DYQ6#LY{;h0W9?>+Eh5*Y})8 zY%9E#@6@+fe@IO6qvxLPY1tZ-zgJH5UJseBQ;GMk8Z9A=Uj1G$wk=; z+sQIl10@3k65=O<&~RWa-x6{XkkDXZ33y$_{GT1EJ5f)6NwO0>Q-~SKdtdq(V&Peg z$%gMl@0L|@pio&p;#OhnbAmdr_p&hX4(1gkjn#pct9*F5DoFI@YF)$F*~z$wq?FX@ zN;qL^_S>1k-bbQ7erC&aMrK>LSCHUU?W)&W&sOJCaB%SvA=Y|>eWmlsoSnEfr)eI_0!PE4VDgnF2812pLjD~`zkip-$P^ipwXKt zapNrxOMFy(^%ox}y++hfA>m^kG01Ib$l8|z_|_&n#3s+5YkLQ%JPmst77o#!gxFuz zbkbYX%`2InL9Df)3YNL(e0S~9Df#qS0IV4Tk!DrV!(nfUyxF|RzFslAWJ=~^fXWYu z`H`MUue*C-Sy+l2=pfqa>0yrSpwmvzCXq-gmmRuQj0iAEQ!w!4*}Nb+-wh@1x(^R2 zIBW$;B`VhneHkt$XcI$7q`zmqSU^bkOakNNZ+qiPqse z=;|;kwt06(!%v`$h#>=x)A^_yrf6_5wXNgDXqvUC7yA8DlaXyj#Pqx_0iOeMfS^9j zgHFfSoDcNVN%uBPLxZ zbE&0L4}OWY3P5}G+Whn@6p24VX}~4sqXI}I{Q4S-hkz9we{T12?K0F+L;oD?`gMMdUZ4p>tCrtrj9Fw&>`!g;nflsv+u zjMYYc)5AMIm+Wjelzj!haBH)@#L3XD0+P3;_(c;ma*R7LoZt*ux`&ya$FU^ZLq7c z{)*tpq(f=F{>Z#jF0rLjeRFW9)t~UAvXb%g$5PD6KLFKH1Dj)3H5t#BV38m+kD_vsys_kvZ}w=D*)J>GW>C|T7Ozc91+ui5 zN%XFnt`kB>4R*lS0dXf6%7`%vrM7xXQ1Uq%_vHrlewoJE_WBu;SGD1w>TME z^xIoe|MQC3aY-Tc(1!_8GpVjX5L;hYHSz+H^=rD=YCuYixbx;ipK#fy3{+X)_{Ect zz6|QJZZV)m@M5}v&B!Kw@9VuboyMYj^b-woX)%$**%zGjb>tBPPqduJ^Vc_FC9RLp z#$8YDTTXvA4f`KIIwRuk&U09rG@X{)Sy~X+l7uC?zGkSc}urJIeQSjM85o@Qfu?1lI78Volo z>ZIkKrhmH|qD-zx7?v!0Vb`h*G@0d5R+chq4h~*0N+IIrVi6%r@4UjhYH6{aEXNnQ zH^U!>trm zkpg~rPbDac&aV@?NTj6t**6VdT>qYkjCs>LDH6b*=jR3KPFU%f>ax=jdJ{MJB{wK- z(NKsk1lXXf6ctUUHw+|$+nwPf56?O|Nla8UJ~ii4WRR(BBq!INt%-xYRl-~|#T+5- z?PPOc0HnJPjX2?U54Wza9?NqZU54#twYvVtzpX*%v(jp zim~zP$sTmGCDx4<^smHS9UgYvO)m0f?rwhL1Hv5VZC>8iG^+G_m270l&e!*7*DcNM z^{`(m`#Zn%_Bx9QksN+~-!tD0(ZL`Iwo&I&$#V`zOUI&4$jOYt&f?9tTWW*^-#`Db z;Xj2_JTUqw7-WnbHX~0W6+SLvy1BadA1h}WtJ&Wo9oMGwU@J<`9trw9Nls&CY>16* zc^1;-(D%hCu%|PbUXk=->0Es|UeL?*`1*`sFQuau9I9DyHmb>k`nk!; z`{`&X&i1sB;6hyEOF8$Sx^-!u`f7c6ZeoJO@V3SH_m9tAC< zI`umv?uCyLsUF%ML24QsS-nuvj_(TYexSV-iZTBV*i=faSki?2Y79g~MCO;x3Ahqc z$oVQC$K)o)`rXONad2?fmacvmhoI&;zxn)~%Q6qB)Ykz21Ve7!@9~C)&K{oOjqmAB zJ=_6)3ESe4^t~7y5WVltcm-<$XfBU@OaVPTXUAF8P9y(UYYs~a!Wr81@%MMNp6e3) z^9$l7?-7XI++yx_sNwF;i7P&TUKh4Nen&!@&x)h%t2gm%bFe4q&ewmwmKyc!4Dc3` zuy%H1q6dS`n{27g&&nb=2TvIcEiRtBH z2k~!_8`L;7A~982#61bh17;Qu%Qdlgt1b1K2{4ep>+^v$T&7>*$#j#`lVZZc!tQP^ zJ&9<OS)lKrMk{qV=ItQTZ|<5CLcW--*w{kp(eTB ztq6ljIfg-?p`r>KWDhNZ78nrz4!T2NIbAo}_Eeseq<(DNZ6&q{wW3phvc%OmjF@x}~O%O|{>z#Gq<*5yiob}`f451&N`>uXDXAb?QV?hJ=I+F?DKx!`fAAh{~ z6Nzmzz84W4a9rW$geNlK{X;Kb||Z2uyjVPjZjvEfEnUc&vi?78f&kL@`}q zR$9m}d{;yS{HDlyY`53t$a5S|STk??ZB1%d2 zdIs-=5B87@(LkXe@#JiTw_GgDN*d??ot+`LnUbyH<&yMwcxK>UcC>D=S zI@!Nscp{LmxBfFVEfEIfh__qc!_;9B)&|OTGU_eSRE*jqAAqe=~Ut60LEM}yG56NT2>Fv>9b#bLCsuhzYSf+{ivb?OWOZ4NOkesB8e*LsHL^f#%c-bbBo zKh{nMqMO|}5!yc}CBBPysP(|e5n5L+fEdyZt#94uzQ9tj?%%R-g(v#EXt@RFu^2rX z#{qV`Gh2}2BJ1bSB%Zm-6<@H?C(?x6(B`~lxr|Cw{tjwsY zrlynKgYSR6qGdsoop|>OAZD5KBcsx~QS84Nt4s#vISt+W14J_vl+qx(l5)ksMn*GI zL`bCpwQQW!_IP$;kD7dGe}Dg4_bXVCWBv69DY$(xdRtpNku`TW$D=vIfVmw^U21|R zZ~jdV#AC6swdU9EfMC(B>&WjE2|wKn6CR2;n@gnD-P?T(xfEnT%3N5;;5UyfQk0h- zWmVO)eE)JObuLLl|9QQ}eu?M8!b0tnJ%)e2WeoThPB(09Xrcoc8yP@Lhe%4S0aS%D zS7q{}Olx>#q$dT7nr2CrgcHs$j(q5Fa|pTuMv8``|L6rkf_roz;)^4;-kqz(*v8zd zr{|rW7M{UnrKYMHj~^>H^3NSu3~_MbM27xfG6u3z2Z2V%SfFr`%5vRdc5}`TBiOH( z+aCkMak4drQU5zj8#7w;po;h90U_Iy#3Zx79@PT|$co$L*p&ZtKcd|0Fh!4^42P?# ze=WQFp{bkS`xFMuP?mOdWX%~EFxj05dv4O`?96Z`0JO^*q>gIN?Ds-0kAPnx zSZwoZNMRxC>J{ZD0ATcqQG0h$zGQx7>EnkrW2Kkv1R#?f#)`tseHR00Y*XOPsR|2z z39!WCr^<={6;VO!YctE~0ub=m4ra=|iHM_3+|)wMRFcTwf?fpxopBlo+U%%hit02R zkax60DG~=|<(>VV?qX?_KOo)ZI9`+;g!OqnUFg$sX?X9lctVfxjVQdAcnNSm7fDFH z2^Ieny=SEDGu20#m6g}I?C|_8KbKAJYZ!I`FXXUI9k8N>5j}p_-cN>ZQnUKHxH7jQ^cIioPX7{_^t6ip6$9$gitk>lK((Nm2hM(z4IY zmjbB*@E;);@uF0>hX&lGM&#rzEw!eRB}#uiO$n=MM!r;xfJf48MSfKrnqr!ma%_4+ zr1^iS7jEwB)JVV{eBtDTqLg2E8z}CsuBOG9i8#QC5D*bb=<8?woACuISBjjbGdt6( z$WO(Eg`rNJSK)tKK|F{MA+*rA4zA)}D17*Hr65C*!=FpzDHLh^x#UWnA!-1I@V;1F z^qz0#fVP^bbTrx3K8ciDhhl;P+vMu^;qC##e;QkgDl0!LP*8>SK!+_G>+6H<>FJSE zQL(fD%?;vobaa=$`Y>LA1{U@|4J@g^_)U6WKdwN6&g)D6rm9g{j4gFg0bGf@dL(bJ75tYct1V?^(`c>8CXhpci zka}NNvDNqNsZ2as?0e-b2~|}LA|j#^qb?DVyVK{(Ep8t)ZhV%`D5*>ndVnwhxWJmy z2Z>6`_4Ae1H}?9ofredyvDt!N8=q2{c0lUD`m$NA*i1!9ss9>OJtssTx(5c}b*c?h z_k~6Uv?2VS2Lu9MhcVIkY^L8@J>-;>r0ngPJFk8azZ3F#@^h(aD3wX)sX;s-0Xd<& zhwfB^sg>eB?QLO(#V)-Gid)6 z`0;4w%5(P%JfujEUsF1sOc(;m&(GIl-CysE&=}HPPN0;mvH42((S9L`j|Hm(ABSfc zbN}$35khg&#grswc7J;XwWtAVD&L_MBFopp{*g5W7)wAQ8AL_9exa!j!G4c611n33&$(#gnHavwZyck=1cmyN;q-(hp4EPO7~D%Y+cEf5%AT)ghM z3Pyz(w0Tyhov36Bz(aNx>a9{oPtMN~Ag71li1nKsr4MBJRZ_xWZf;(a7P-eDD?9WwJ%5D9mc)yO1OVt8~i4xqFn_bwsx%=xZM%gVs;V5PCqK%J_?|l7$ArnIYLr+h?QX-}K zfl(h!iw9D$hVo_JIZsc|3QDS{Ixn~^r?rRGUAtstWOi0M1k>1z4A=3j>c+(-C1E#* z;v<@xn#L}MKaEjvT2E4-5^*CXCMF`Gqsuruv(Hu=#q-$B8iT{0Z*(9&Jw5fV2klpd z6@S&gV6kxUDMdkRk-!IPmKU?&gw~FZ;^Rf{n`L?pL9Wfu&u_dpT`H%j*tZSp(;a>n zz4P+~S-cJ#1GEJK(hH%e9MFuhA@q6u6RZ)NzHqb>ooX_Xhg-LH-?K8Ai{kQf6j4!8 zNHZtPu9d!L=2>W@aj^3P%vaRp!y(uXvac%PZ z{R53DH*zng^`G1EZJPi5IgI;(-P{mT}N+0M?+X8$YB*(!tR+gty! zch9i3wY7mo_V=-g3q1A5hKBso(ig>?QqNw1 zP1e_Qm49!KiY&RVwLrkq`m|(;mT0A&_-0H4nDgY*I)4>1gd%D(2S56LDdq>Cm z&z}Z`LyA(GOG-*gHNI!IQnIo-mkpGZlt^f3l{1^Uf?kecMun9CpVu3K^!=~X_LX&a zcVFAt(OsVdRr=}cTAd%wVA3BoS&zXz(2Efl+AR3zdP@29DRO3J-OzYcTpX)gb~`X* zV>>24TU%mc;_=s~c>fXSOU9p)Dd1pG0)wY$_|_MJfqPEE&BIe1f?;N6212h~Al^jP zg-961e>@<^NJ~$@IS`GXlpt~IBv#uO_2$#3Pe}sgKI4@xn}ZMt03_CSc720`GACXJ zNJ4*u8Q>s#pCICZMGg-Si@CdV1NVqYOsrq*sys78D_UM&)~m*lqUWSN=>BC7bm)DV zEJ<@y92u8U2@EX#UU2K*J~oo5mzHq$@b!5#GV(74nx{?nE^F%Azf2q)(6F(w=NA_h z^t>3JPyPWQf1*_{Kogjxq%1ZLSfr$}6{Z-+PKA|a$fR0YrOv%-2BZ?Hy)A6aA3x&k z`H!MV+kZ#Jt^0jK(qc;q$DqIdLOh`VgYr^;&W19!S}MUS^QW^ zv!>Q>^M2nvBQYZb$<56zBP)wqL?q`330c>wT?PsPq>h?^YXkpN2nY~?oy*C~C+>T^ z-`h*7k&snJ6R@>q*VKs!FWd8b5l>WB<{HMmp-r*Ty;&^x`gD0&gGH#{Ku@7@$9lP$ z!`{Kc#Lg}n*rwdzK?w^Fo@M{wU@s8y4*_awYWJfR($O>)$^F^N^7iVK6r6X0Ug%)+ z2oMk}K}YG|P^C$9_4~C31C-|1T2E+G+!X~5 zlA0>R>-<&$_b`cRY-HRjt*|=AYFxRua<4xY7;jprpnn0(ck0d$4bC-oPR=AaV&%8o zKHHUiw#;Kt(}fs!2g87nnql4|N7D{lQr!$tHot3n`ItBI#Yw1Ak;5h^plXZr0oK6;fmU6*t*JyWhz>-bIJbwmU; zD=Rv9!>*>LMhtkVqeFAhgNUY<*8197@bTsl)NQCeH)9hA zlF_i=NCT~dSjDZJDX6St+&m@>IK4{Gv0FjehJ_;#eumi~AG4M7QDMq~JRR z>WUu4fukrYDw3gH6 z?d@eamiEidSis-BN>9f>>b4zhE%y4kGgSx)eg%uz>9MqtA6#L)350pMm8@!PVF3@K z!0FBuENCD`LPhN#%j7x+0CyAgr&zCcc1-U-!N%qQ!E3oQfEWOr!P!cE6AO#5=4Sqb zu4GRl$ z&s|Cp5wcwWlgFoLXC@$sf*=opY;0_>+t0HTfR5zBL4?)78;4TP9Fc%GHecW8i-#hp z>F7j&l`en#=Cb>RHuEO=;$8II3m*@rqk9x&o7>tX6%|oH)j`NrC}q1DrR%_b=nr_#i;^K!T6D z9>^>$E!lZ_2Z5AeAmB8Ae1_7_$kflC5#F5t3h$$(d=7th|J~Ufzu$R9 zQ4Et#RUR;D5V!|&gYKii2G)TEM#ja3XJrwy8n)}&2A#Zf1FaV2j!bnAARYiE=;jH0_g3R|%O6n55Ip9Ji7a@83_U+`%jMluJRH_xgB34#ba0m!#diVqcNRpC$p#CQ) z*tfVyG!#cFDK7r%rGe6xiIWosM58jO|6gv^+}a9YMUyvyu&;ZIMLZ#wjICcSG^cz~ z2R8v&b|6f{;!UTys_d-rzC;@IYh3%i{TKO&o->JOGT7EP(%j1W!+EmOAwH_Cg@6tgOXp zBgp(BKp>^g(#szVgc5SQx0qo#+cj1q*p&3E;gV5R8PR?NW#p*Qv3-ox2qy}!{MLgKOJ@fow(d7j5jyr zlWvpIoTYrkB1gVAx==P=LQ4zx?&hL8 zZS(lQWu>WhWo$1lEPcd7n?;R}FZjWmQ_z%yNxPzC^4$)ta*hxoq{g`C(frbq+4;c& z0lzba<4SuJhUjeq9|=GNGYXMV2Vpi8Coi&XU0zmD+$kbbSC<4wU>_hp5E`o;9`4*! zCn1W0!aX^a=q@cUFW&pM{AlwW5>}515o>b%ra&6e z+UlE1mNGCjStyH9`hA0@e#`YZ17pKX(0vCp_zTMmh*+VuSlYqC0dnr|r9Gwgih>*b z^k;#$mz<%bP=TJY!v<@j#B_IyA%#hRl_1*M+7{179hFv8FvG#a=L5K)VPd2eL(GRR z8%Of0xS006Qs$%Q&z}Q)*9~G3pW|}E6_LwDQxlpRP2j46oLoZ7{eE@VAiy^@zym0d z`e}l8h_M*J2$)RfiF;q4GM*oPLjn0PK&6U$qEJLS^AXgTH7LVNTLoJ2VlyW{O(w9g z%T*Z{CPzIMscV!$q^6eV!*MBD-`sqje@M@>$hiqZ+3>`Ktf3(ph@ij%j35jL^rB{i zs3Uz=F2M5opw1sarF=Cc@K07&7Ls6FseTKid&^?;0^`$$1+A5KOW>UVL=K_2-Sa0A zWcXdx@zw9`;!l-G#~khl^Xfl1QatQ8Sg|+E>_GZiS~sw%ABnhIG5oBHgS0G#9)oD^=}A?iJ>fxE-1jQwEH~IH;apk zdIb<8ptgc>nY6`#ddHKCrV-Kifgn&Q6T6`-3;a3HQxMec=BfrjBq)-I$+5KDaj<#3 zM3m#3yhn$orN#Z_Yo9SOaj6yNy=W?&ST0vM&G~gO%WMl-u^8_KOXv8#b z4i0fZ^%bXOW@Q0SsJ90rL`ew)(t79}DUVagUb1m;U`0#FH1HED2qAPJCAQ^N!WVTvbF&gTa&p!kXcQ`1^u_LH--bN6)V^V*0n({t(WqE{fY zuWfGrp3PJ(6COiCL6Hm$6afK;f_Z9sczhfk0Jv@thn+Y2qco}kv&ItK%lH^j+{}d7 zlr%I&ni-UKm?I~HfeZ_0Vz<{Dq0(=x$Qc$)x2n7DlU~S3?-jM1C0MlpEDYJLA~6M2 z9->OcoVD z*RHIrT&_ouC#0m%TpVYM1Q0=JOa_u@6(@n~f)aGV%AvUmu(0FZ54k7d)KpXjL@vi0 z(jXeCd?6}~s$#w7>dzyZ{xpes$HHQwclr*{IVes}P9y1iM>J!YUL1@I24eO-A@Go3 zK+dM=(I3)57dTP}e30Dx@3(8A7&M~;cLNsiBz3K9!@u*GbH;yzG%(46kdQDbp|~tG zNSk&HGpNnLzf3#NS>Y=HR`~#d5D0lYhfTcD&K@aS+MDm9GNLXc3%EXg)!HiX2)sgq zi3u?U%=j7r78wv(6NK9F|a_o+lhK2$V z17i^zZ-f|B!7)K<0OJPL1%TNAhuCP&-koH|0T^BVvL!j?aKeY1)d!H zU<@d7U!Hg<6wZNYt9X5+LpPRy`v|9c>C=Ic_GpZkPwZBQh-R#l7+}u<;d+0yO%4YK zm*;nJn6zq?uNLX}trgnq4dS*m2x(v(dM|uz;54DD08U(EbbzJF_xynIMcQ9VGWA(l zY4fNIaHYj*7e}k)fJYq!Vxp}KP!I^X=Dxq)XY)T9p?7w3W4g%T;pVQfU*NP~{3w4Y zfD?~FEy4zYQpezdnLx6yuERx3t1clkB6h_?+UmlP^hXZ`VzZgODiq8l)q)pjx9_V+^CDtPEvkN&}#! z_it_ZK+mB$YZ{AwIK65P1!z!K1udGuQzBrH#Q@xdDd>5SP%bhUPmUodD7dw?HQaCq zLQ3BI_u(VS^wi+!ZEXHPI6yVPCd$DnDk>`FijW>3ALEj}OaWS7A_NMHNmaBCsAvn7 zuWflIYgk(!fMQ!THsj%((x`(L*S~)kpxt5q{$d$a5R8HBcpVny$f^czZ*QTcm>2by zwFNl8h>owP$ofZ#a=!G4>VZeN#o#G;l*EAP8AhvF(Cn_Se~&G&l}0 zA3PLfwX~Q3kC((wO+f+K+uQR#+rxEg^|A(K1PJ7V%H8Db%w6^Y*ZB)L71B$1&;(Jp zLD(6&LJy!?V$2keP0zJgS%MYQjyBmZJc1M>ERBwh^#k`70_rl_Igbpf z4RaU2)|m8hlF#cm*lO8kd%u0|qpPog z69*Lq5}*_Wa{0a(LN37d#)6&LFVw+{-u{9G6+Ki{Rn^j;pr3E5T!*c+R7&2Z3ac6e ziZK&!--t5LCcCY<7QD5r0xyk3#>6l;#IbO_xxBva?eB*Jp;iijoJ624;b_FYqoXKB zfmgKL+}uTtKqFRP*dY7IdIvz=aOBhNRJs0RPB}^%oY-|Ll)e}$(7_9R`5sz|{pKRbSrQ;km*Ka%n^B&#YeT9cO<)Nj&? zOe1u0alu4_13ZuRFtW0e(rYn_;IJ_Gqbi~jj9IonBG{;T6ZcBNku11ZZ1bfl6(GNL zIW!A3Kx;AyIoo4SZZ1yaqy)7e=g3%=z>UB{78@PP6H#gd{O&ty z4IQ0jXX@WTFDcd?x!(n8y2aUp%dJQK#R>&O$Qx{|&zav9l=rRPI_Oxuw|sB$R6ET` z$5B~%R4b0#p_$9AI1}3{1U9h)&fF<5XnDD5*)4OGcF-!fyWGMRfk7TDKS2~WwpNWYt+ub6TdnF)B z8XCV^ydOt2H@i0cp6ms01D^j{cAEedlvqGX0v6u{cq=_}l8>LqO62Z(dnEuAcXn~1 z6c*0*S`7dIj99PHJ_;aiz_9=~I9Y8Z3V2j!PfttWtb<*<;sA?h36px8}gbd(*hY776=!hTn4R%fWhC_?1ZjuZ%0Vv8;@mhK-!N7iGg=a^|K!>t9B47 z`$F#U)1bvQY|hXR4OD;u*OK=pwrpHnT)a*zsNLn&oeDCw${`?e z^x2H%18)_Y0fIRrGc&cMy1ZNw@Qt80z;s3evN&j(xwE(TDHsLFeIv@kV;fzW)!_a3 z=gBAegXhC?aYIAN-no3LNLV>J`U#K$n8j9w0wIoY1|FNKQ%$0cj*i>@5K02bimZ^77FX#^?OLrzNfTVE$cD zNIyJ01Sr?#{?@0Upn%|+D-EC|20@a-##W6Sd$cu@?6aNi1!5H~KYs>5EmHu9)2X~G z$>9wG$rcqgHB3!ILorD9fU|Uscd$8Yh?Uj4F0AYT752@eBUKd@D4ReDYDwVS*+I1k zFiG{uv7sg301-X2+C?6CncsQ~{w@l%r~&wy`e{NV!0TULT^)bsTTNNK=Uerc1kRd+ zojsuk57=fu_5?3StyD>(zV7hn7}WE@{u)8fB(A8aC-z9OkWFuYA<>AiV=sx2!F}!M z2m(BtU0lbT{MUqC;17)eEoIhkL}g=R8~glu1|wbc{<3hecG1#8vq&T_$21V|$pFfX zjk0ycfSQ5tprpAu&DRgAL!jdcA0OY)|5zF#6Gt)vV*B)W5X|A%04>b;e-QT8QB|&A z)ab?lR7zB&K~Ydax+Nq8=~9sHZjf$8LJ8@T?w0OQknZjjknXO#w&$GR_uX;-xO)tP z&E9W3v0~1-)?4HTfVD!|3+qL3n6a(By*Ibsckg`SSceTUr_rgNi!}~LkwZ0`(HvMNXt<9Y(Dk`e? zhm_%Sg@6izJ#z=F6qfwYiHUD8&Xbe?&D3YAq~fP>I^Qy80sK^@O|}@w_C}cx(#_j#R4OEaM2g+SF93M?wmr z9PI3fuSzD`fpgeJzucUZf=&*3aO$>x-g?~j7Q6Y`@^TxB({gGAWIi$x*`)8@zu(Z~ zf>lSmkKznOC4>~&?Qe;$WTmBCo~-6WSibWEjSP_dB%ZFmK3vEXfmu?>l#uG|=z!>9 z_Go*4Z*BCBJLI$>p`j_3-?BEtNdB*}gQW2{i~wXJ8ag^DJYBzjK}-}X3QO=Dv?sqn z&Ef#$69ed$rl#Tm?hmUF(Fd@}ym}iI1qB7!YzdSWlJl!sF050ItIq)xd-##Y69&r? z`%CeAn!Y|uOl+#&t^h%0qQcK{PJO$(HzXw1qm(>}asNvhgH8)h zE_Qb}&uNa(F<=J8;n{jt04RO$`2K3p3e9s`h3a_!1Gu@vw5dtaXHv|Px`@IfAq zfF-(+TQ)TZVV6>1KOX5D9g~v5VOG+&%iT5WnQEhBnR4l9tXN40MWv;BozX0ihutA1 z?K(T&9ekjw2z-~r!KnTy@P9(+;ZaeoU?BksY*3=~S{W7m*RoF3+q4|c1j+TM72nNZup8>t_A>Rg=83i|M>f*u}(n z+)AX7KOy&bOB^ZG!5j(_5|W`D#RlX28u`58bSwf!Pe7`XN)MZtrGWMUB}>5hJ_Ez# zd1zdmB;a)ZYfkWYs@g3eplE)4!{PT4{z*@T=3UNCJU7?U-}|6*;@b;(`IM-bAY+4! z+h&4yZ!zZV?O8ChHjULIZ{R5a@9hp07;8~ZUb(u+y_ zddN{B+Ad7099f|C>m~@J__F15M=EWoL2m;p4;&nvdWfI_-T8F#Zg4RkIK>sR!e_E| z_Cx?30Jv-5c^zB_XqGajqh-$Q0YOz!RTT>z9~+ySolWfZij7tEEp%EH781e*odo!O zNl8h>3IVpw%0TZQYZVpZ+1Xjbb@?}tJTFhz@d^i$>aL9x5Ocow0X73Uhx-+GaB(fH ztYp>Q{?E7?7}HTwqQ2zzdP{<~tpl_M8bm_#>t)m6d;(xo7TIrW`AhJ%pJ4bCvV2}> zk6;42`tHr^AXWMWUeERXq;+P9zdeFZp}QyB&=tT2(P2z<^sg@kz(-J|DoDEz#=dwr zS&{6_&PqQM5Oy!BsflnF^~vG*>stNMs?0pvAym|NMEDx1`GJ{nq^!SIe9(xLQkIrN z;v$4i4%yHr=gy$aayO$KXPiKhYOndShiR z*&vYmm)lWDm^!@(we9L_D=D$8v;A^NQTL>z@=yXjOMy>b&$qRP({`=pV13-u#zq$g z4BNz{{RM2uz|sQnHfB4%n2Vpw|iy4+sCgGv&0YNmF+_BaF1srNqXYb%Fp%Iq?fy zKZsAaAd=sK6(s)$+#xj$jSir7k0>cY8ds-bW`V`B8}s?< zJgxhel35@9kf@!$J{6lfBn>69&Wm2wGK!@hD5Iz+DKRl2h9>+0i5`w}GGP-xe=?H& zl(4a380>F&DU7z7q)wB41v^3h6KU4hD?O1Cw`0QICgoH^7*F5P`iq_E4kSb3jErQ* zIbWJ__137(2&ff|5z=Rxr4RSTZrvt*A8A=v*muGJ z+#D!uC0@Qn0N+IpL8!y-A~58_uV4E#*#IdY3&7xn9f_T$Ce&?D%p?c$XaNlLXLWTK zFxx9zy~1Q+>^!Wj;l;(wkSILkDm@$;wq5_bP9geh0bODgt0^JuSU{jcVVeWvpRt`5 zy%SefJ^#!ot-^Y#5KwnY@yLV%LlZDZkz5XpNPCO?1GbpDC>FG&GIg#lSj6n^{E79= z&FPJC>`4azuIip0ZLhqfc>3V;YJ^l@#bL}~<#Zk?W}A)5)-N})YXpVWf-u+;nvo$N zH6sNJ@{QQ)^QRkOH=b;y#>e`*+OkqcMdAPctrZ#+peae~wXmjd zlkkKlXYR=afY~45=66Abqphp^3p{HSvmq{o=sLg$+{MG&0sNppQ2_w{C%^hX4d4A5k%7$?TwoAcT?) z5F~)9zYe)aR?mo0cK-TUc3*s7lNF$-;@{YVvK8{RG&T7^i+Z$H)XmIl^?cKz{Y75V zB8Srf4J-j9UIwC|tVXLm2pEBKZGgm>0}_U&CE*eet?jxCkQ6l`r3>}L>TaEYT+6O( zkF9)(rsC#S)%NmIm}ed2>3Jgc)p9S;A6LgmNgtA{HK2ao%Xrn!TDqJTN6flN6l!D5 zeto`YMIo4r2dOYXqlhgGtUSDS^fu~-kw`98l0tFu*5Sbi&GlbRuIiFoZuW&yeG(D< zymQIK)Z5wJ3??V`teWutJHZz(ZYvkFE!MbU^KLxhVm zW;8Nq@{fCn*7Bh#Z)x}q>>Uo9mC&IwViXmqE^u>mgJ6flZi6^2EiKjSHsoYrzpy{? zLq(-$6J>GgCxXZ|uXy+5sG>kSM5rGU|IP89IoIyjJz$XBVrb!VHdK5ei9b;7js zF0=+)5Akqv*HJO112pi%$B&y%DiGke3^BfyQXZMv*vOkIK6GssdnZ%HBlaueTSmD4 zK=Dg~4;V;m&g1d2V1Y91hLLPXTb;Ump2q_YuQtVow8QGeR2ml3p~iVi%_f1l-@gUl zytxYmq86mxAhs<8ITGMlQgChp4y%-tksu1;zFcKCe}$UQSs|A9_(O#yu$dpVAQJ=9 z+RxX?ml-tz-8~7gTOk82C@PYxLuH|(!v-J@#EXrdH}25Wl*)i%HrA;py+WYyK+s`u z;NMH+=gNH4VgDMCJ!E7GDNDo;2!sd|(ykNY>L6puIbYYs@CV2 zcw#gxb%PNZZEvT9(1UCDe3Dj)_V0K2Uq`|>`9q~uuVVi5p$DPBAHgw-Gt4}^t`Yo= zQtd<{W%vLwdvf_aOnD|V~jNF_cb*z}JQo1QV4OaK{tp>O!HR=^!94~5 z55%mHtw0L%D?;5>4`3o61HA`1vX-vyTdj#_j{bnhAW;<%x_~x-+IbR!T@cI608|7? zx+19HJUl!AIAVnIgaA@*rA+NjlJw6*)9_iy?0-y7#wH|?cg6Ec!Onw<%-^tcMn*?x z7Zx5**YNu&5g5Gwid@6!+xS2Iz~CY?Ob`T*!uPf%RI*Ag~Il&yc>Qy8c5 z0VK#U6zW*!mX}2!#f37Hm3heh6|3zTV1f9NypUfVdAmngvY-A?+Yt~U1mHVSAi**- zGXp_=Uzxc&LJ~@IGqp?hv#a+3Wh3BrWX8h6%2BPt!^Xyjb)#6Ibsc<5()aH;U}ulJ zV4B^L1z3VGnifd>Y7{KDh`4wPL@FzcfSMp=<>Xk#YAj~2 zAU10bNhv8QSFG6B*bpG=V-BD*AHTP}3HX&5S4wu<=S{`l3>gYUo@!MZEszuUfKqY> zl3Gb$_st?<$$ErL5X0L+GA?`ip`oD_t55tW?s<7;sR4tmk_)+)pkaZ;rX3hamZti@ z2*W1s`b_|VZ~V6r?fo>pR`yfC4`*3Sf`)3}1a)x7(}~Or6qb21d1Jy!_Bb*@rPO`u zrn4TU^bO6;!W11yv=MX_u(S|Ij;kRzhWw1U`2Tn{J$1QWCO;aK(>)BzaqrcBA_h<=$21ioy^6>#inMC>DFgn zayG+0!DKFo3(8+OB_<)aw=Q?eeYcR-8qFp^1_TWZ45Eq{pIQ5loybKoxU6KRGq-`- z5yfD%P&X{#`BeVS5HExP2!N-aGw?t{_;~CdzfUyje+yIw^QRx|b_CeaHZG*|-di7! zfHz9bCfmT^fkbBs)S1^4UVXtMG=-8IEipI}k|b2&zUD zHUksJEQ*dkmaUYX{5sT?mZ`Z8oGEFTQC0Q%b7L|erwHy0J$_FwFQi2Woo7oVgEsTQ z=W>Nt`7+}b)K-&YdFl35zdnX!ktzqBfE1;E^6}$m044X2d4YTQpU?9#x~s=*JcLpH zf*J*>kO#7r2?BGQD5KVwkl79BLMkc8Tb@1k^r9!`+qay88=$_UR4z67F*Fn&q2`z= zYZ_5jHx@3L!Ma96itBDwBnk>d9r`zm?@S@Ll2hMr z8=N}?UR{6$lxPn;Gcf<>3kPXBBH1k;BET$AJbmg53I}ACsipOhu6pak3HzYXS4gO- zn3zHUIJfyf;LTv>{*|! z!)f+&E__BEbWm0-dXu@%fa7_7ThHB!pdD!4jTdJ3YJ^Nt_;j0!?Y~;oxhFkrMmoZ{ z6ENyJ5O<$J;14zudbpA?0m=wN`b%V=usrazfA_zc!1e+g`TaYm z9X+5RFsq=Gg_u8=;p#XbT96dg=O`8p5LpbB8auy%gS`Uhv%R}3hei&;#2sJ^m_XqU zD}kl$aSKk8R;l@P7Z^W;!D#I;#S8QIBP?1E^RBk36W)-C0obenWR3_VuKQXng70>q zYH`t>0$N(7OVXk2jaWL<-IkR#&=w(O-OSJ`S*3{4XpV&kL%Wc!2%d|o*B(#Hmf$+zOU8R5zvke)GypylS&Jj;p!XC@iX(}}EJ z1auKL2l%GpEMw9zGO`7Z8wHFi>`Z-N$9G7mJu@sJ286&JBDgof zpf3P(61aYLNJ|9-!1mGG(jxWYo9@ z=t!r6{-6z1`a*hoX&9avw?S=vG}2Y0-HZjhD^o^qW99+dD=vn@{@~Wua*C_lUm+>Y zyi~t}`EOjEI0#A1Xo+zIe3=12!sD~uZn9$sH|TwWMaqTlo>%)ol4srG!0=0LknhCZ z?MC!lTrYor5@5s$biYQ(|D+W)0j{9T75uaJJAF*rqs42bGtt zJ%mLym-xNZ4rK$C5Hc~wCv(1!nV4XJLPdBV-&jrPXT(Vw6Sd2gs?1)}<~W2v8nNK# z*Tn55>H|A0gR45azj2cw;G#fA&%lQ&?w^}ZY3XcY=TJlCf1|9(8W6Zp4H_>B1_qg9 z_ufk6GV>8m(kv=P;3B_4!*m-tRT-X8Er_C_q2-%g7LVbBGZ$nylJlG--ihSvW2(8< z$C_a40f>04Qsa0JVSFgWh!01Ryn+ddiY_Jc061X@j&GCVJPI&`f_{6xJ1|vj4Pz3n zAi2XQv_c~OJZH;b+&U*eU!WJ`@1TP;v z6G7g8*uU#8sg0EegM3}`VFTs!`g}PlKXGveR+eOum@bSK%U6#Gvk59*1@5;@-Kj@~m zz3{q}CZ_3Z`ih7Tmj3X`V--%zuPI0P82%C&(~a@90!?$1IVF{nfOu@$686%nu*kw zQ(grB#VwtU5swj$DkiO!vT}!G93s-6ZTUh>PJ3d`l{!RIo#P0L7px!FIxx5hnU|^z z$+q5<-FMhcUjDtbWPKKB>da=!tk$Wtdwl3r@V!)jw~ubL(1b9vwTys$#o9()l4Rn? zPPfl8>1Ga4pK0u8>pE?%-MT5Wkd^r~Jqp`z25vP3vb~ z(OkPs2l4SkGP()zAs51 zxzDNs0=;gVjKmQ$#U|o@4;%B(md4N`;R6jVy;kLU{n_^kQe`OS+6FpFv}_DT^}U@T-;E=- zp4?QHBpE@jc*Ebr9;na9jEYt345<-jY8+H>wQVge(wAlCRdI~usRnby6)MfG7?=vYI)R$=lBQWCn{io~xs~{j&MT^bmu?F;>octSBZ^7wPD&m%Fx; zq8If>8idyTRJ`(L?#R0N z;&+evYPrq+!%UH~r%z~JCQ3gU&z~+ABr=Qh7cCBQet}g^R60SkvMD8&w$mS+sgyt2 z)hfY*NKy)VDKMb-uLJY({y8?~Yj*{Y;o#HaH8fI%2r5DEkZ5NjF=aFpVim;es^kee zz7pb;a+m`5s&Jz73H738^7%(095OjG;YPU&=PZ;&x^p`kt zd2p)PLF#0JNra~0&ng>?``XXHaiR_HMLH+p`KhfNx>pmWmGvus*e@BmI#;FtF?5F0 zt_&ZrPgyB=L{CvadNrlhPo_S!u=DjX+Lc0R%iVfu{nWSZliR`7o~$CPV-F*tPe#^V za|b3$SoJUD@aDnHInw#9Mo3jr@P>7@I(y34q$5Sjf>RNEIynKIO{lTHi z09WDDj12$(qJglqbn};@T3nYG$6to>go@+f)BBoMhfs0_1BD1OZsH}>Z zqYxO>9aSV)o6Nuv{OtXFKc@JZ(Q0ASG1BOY%BA_L^-~h3adR#(qOHB9Dy=js)KWpNAo63Ow*cE?BZbeXq2A&jlAi12};P?=MzFeFOMb?P(e#$F7IhU)9 z^!(_U*=cSq!gMFNM5IYFu1AOah+?O_I*j0q!3|X_BzO%@OIJhC|G{6kFrf6qZ7pHg z@7IW)ipS&!#hb6wL>|nI|@?)UOqk2tzLr@6ktl75f?0c|ZqbFm2iR9`5*SQQrQo^OTw}1Y?Kl|H} z8AC+bWsc=HvG$>2m!+!{N@dNBZC)KOS_QAi{cc=I^*led^R>l%PD9|v^TE@Y&v`ie z`6O?QKa8}dhStw-l#)9S?y!WX=3Z^R!EnZIjw?Xb+1xYY*^g$o(=0{3Wi%8`d+yaj z?o0Ks2eE+R|BZMd#uH5VKQ=QkieG^PrCbd)tB*0lCU z!}61pqV;n&>VpQI?)whQF*bzj%Y^tdJuSH2LJ|hYD>y@E(@ZH%f?*jk@ge@gRp|~k z>-m51oc2@Y5)j=hUPimBkp4jU!~d9*%11o;@S8bUZ#c4^w#e7=BdHhB{Y|po5RA1<{D2v zK8h=U32?LYyLYei;%j{3P+U>$| zUrfP{)9sAo1utphg2qxOI&P* z-7GPDn@);o4)0o!6vV$?QK6HbW^ta+ea7iXuV@!cUo>2s(oo;=ZA;4A&7cNsn1CK6)h^YqZQBU(WY@qZ6sT#}ACB-9xkO?!t<*8hboWN5^7k z(~QL%pVNq9M#d~{rX{=1i^C^6Fs#0)r9Xw#R!__aQ~z-B@$W^Qd*>zUJ~F+*cOGOh zMMeG1d*#;oWTLHOhAp=vB(V3s>ARju8Xt$n*7cY*vVV&y$&q$~Tl@7R{b`Lpt>$I1 zZ_2M<8FhFz8_c^dlkGWfMxBMYy4wnuSL+9wun>UbArV#ZViCRS38)rJ#^36y1 z=6TMymnQ==cGGX1FVQD9W8Jph!T)qPx(jx|_;H(6U+9BCBR33cHeK?aAKNpGa6gBLEG=Jwzn{>V666i=2aV`9CXKI=Y{rr2c@1huLZW-b1PQMJQ5B`PC?2 zajut-kupye{~954^)I!RBt*S#N`W2n>`A$TnI0_SM_kj` z!{PAY>rpSPWS$4ldBQ1n(=5$)D`i)gboD0>iBT!_7FNC_8ZOUAMSXihR!&IC@gO`< z&BVhak)i#vIzY0ZQ{6ZuH02u4TL0Z!L{50?_FQ#|5)cqhTpnu~X?C(X<+XTA4P=lb zE1!TelOLx~w*Q{cGx)1#f>@c3ZiTx)O33AOZe_Du%y?+4{cB>%FZt&^-w(Hguzmgp zn}VGw$@%IBym3NJN{4A0sNDb3B676(hj-_ALPY*y!w-zzvf9yXk32JFS{ zV^*S~rqY@}4gBWEjM4c#YWS!BTIdU_ka(QLANLU%^Mgkos04;jMO3&C>0G4_9chMb zHjgO;J|HX(%}z#|PUL&v4V#Xr@Yp9cT}O7}Ab;{CLujv#o;N<{w+;+A<<6dASFXBL zKIkotN4{<_rR}bcZ@zinVem-P>z|7*xiHMYxqI_HCT44Q*U(Y0>N%>ee95kv;NCG4 zs2F2R(*%X(GmS+O+1T$meq`d!Nm(!!SZ2|N2dt5*rqWY9WxkfK_q^IY@TjztxSY8Yz2b!j)P zJCo)g$;oc;EmIQ4sgsoIdk=M18z-S!R!UT6+|^#dH}Q66f`)hyb!rAjharGohO#S-WHKP+F>%>DR*O|~(xGgGapBK!53PPkBalzH*oG5oMEB8!uWftT8b5%NE#JB}2md2R!MYmE|zUi{rZM zJH20?L*A!*5PpZf-0=z7L{h_OiR+CBN#aLOc~Awk@2s$Gr*D2OuxIO^7%|tuHkW=9 zS+V_uw=1U6Ej>^}Lq~l0qT$aAdy|Td_UF%k$T)1g6?}~oU$c)9V8a!)-_J{EnwI`( z*V4`HE+=Qlq~pyy)?$o(vLiJYM3Q7%BxfC=9$sd zTcji}ELQ&%h{i5u25lzy3_iA4=cF|s7jkswauo7nFWXD*_mrZyvt5K=TWhRDAl{W% z^f~N2TIfbK9O19FR(6;aD;u{*bOy{o&oc%>L-MTwIy zYQ@W*GRZ(Jrn&0)^WwaU--Ux&|Jm}5?RSHj?e&Ao$UsvoHto{4I)j{~dwRFsE!aAcdev|uZQ zt+lIdW_R%<7XaMm0SyavEB zD@cg=Z1qsF^7fC+#?Y8e`=hSc#U}#*xv;UiFP471v%b{Q-l^NyHywHDL>}j_T)02J ze0Fla#@HSI4M)t1pC%1&2oO9-TzSYQJ`qK9(F2G`b50Q{6=6cbKpQ-otUfpPDP+ZZ zI>7%C6Zbu1Cn{HE?ug-LN$r*Ba|kGR+S?RWxZUPz4Czt570H7O* zsw!^M&zA1?euKk&^`Wm}K|+BZ4Gaurj{q}N$5?Kge!<3QF|b54<>0%$%ZBT+%E=^L z`x-!mUwLAuE>Xp6cE>`&xc6Djeoq9hqHJx9X7?=> zSlC>j>4;{57*q~oP)Q}uvWtrtmk`RJ*x(Pov9TvdX>{0v!}abV(|I$EeYz=vf?pvM zGi7$&u%&~IbCet^*rmGI|9k2G~QXRBN-oIaAhx0pbd_QeI+l-SQbF0Ij>BI z>Q{P)#eqn3=Y;VjqW z$n8l2{c+*y?!(yl2OPz}F+Mha#aY;Cbzr``TCVD z@p>Kqo-0U0DsQ*k*40av(LSL>9Z`(BIhI-?rEs`*y*@Qey`*mGA3VW-k6f+Q5wed| z+8c_!C1FkR)_wiVT?Dps1{GJloK6*$>iXxj*x65SVRc>1dH8#j<~OU?{Ok%8=GrO1 z!*0r0o{o4@^j-d9D>^JFO@>sdxG_aF@FG%iLX}Z>eFo!+{-X?h!kFNwxHf;Wbi*O} zim8boCA4CPsK%uPZ>-dP<}~@+CA;62=U;A|ekyN~KEWdW;woc+u3@^_xWk)2eR&3Y zp|4xrQ@IlX{t3pHiSI@^j0W-*-q8Kt8O2SM@>aXbQ1ZRf={pEj0=mwpuUOmdcH@Or zDs+*mHF|ji0r4SOnHv$XHX>-WV+sHqDY&YO$-`S^d1@&^>S%<;2H7C4m0OrrV9Wnj zwds<1Bk5;eD_a#Fs+Z6R$50S?@?G#O2%YI(XYGifu*VYWq&~_qLy;VuS|+Cc0db_C zMXP5s3r#0$)6SV(sKg00ril38{N_TdJqx}c{=6Yu7<tMdSwQ8Qo=lAYM#YrpA9}>MiPZ>-4)%O3tY_BwKn`%&y(cTU z-Npv!-NJUWR67G}(M*Mc*Ht_Hz9ZW|YDuPY*1G6&((j~aPi0BgM+*gOE5x=3n9dDW zE_1FCD2=d~puaQ{_iOs-RQGmT+sIsf;UK$js76d-(2Y$0>1JSDqGoD$2+rM(v0Ii7&gj{Z90FHRM+aHNgiO;ewVB)L&)ti!kj49G)%ET_et2TILfg&iPnpd` zrDM=Q%J|*8cnMFU`J~fb=bcEuhiQ7n=u`Obq#PL_Oo%sXA1qZU2y3kVZaQGu<+x~; z54bD{6LMX5xXcb)8NK$a!E??WSLPEXPcG-%to$t2SnldJ5o~fge5aoU2cb&_he(i&$ z(B1ZgwDPQ2=Na$wsH}6%uGaP1!_>;1{e4}~74Jy6l8uf=;p&9`EXuZ$SKJ={TuQ`* zD(B=Ro1Dy1@cwD# z>BnM6cCENHs{@IWRkj10L-#<-8yCfRUDf3(?fIE?Zv`X7;M#*s-N7N@^2wVO_v3ND z|EC4`m)>Cc|0M;;zRHD|hrBEHYn%|ZKCyc9=WQ60SY zdeAlB{LE9mvO(J}Jy`a=U>Kb}crIB)#IL!n>xS5P;b|)2RY1;7zWnt3tWw9=vW-ek zD_bGt+U=FPQp5P>&G7#E1rG41ZG-7fn_@PL8yk`!=qE@$Lp>EK)A;ehisUc_m4J1B zwLe3OY-S@4+rrMxCzi}vS_}IoQ~GBY^Eau_iKL|FXd|4dqr$wc!sV4rI+{>#q{Ae>mAqs_!V{bHFr zEUoH#-e-tdv7e6hO^6Odfp!WN(^V@Hq!MHAhaBTjpT}u+9hmv+kVW z6+8oA65Nh~UC*QmK{&bpI<@|hfcf0xVPhaEHgI*4e!lvLb2$%~F zw|^XVmW{AkYWZ3Z7!C(W#yfZH^B!7T9q$;qY%ZAoL;thX|4YZX%-pK76(NxqN4X zY?;aA|1Ub|u!fHRgWnkc^uXQbGzmjqTN=9j%iFiu6p*XO9tyMOgUz1mvlf$Z-vmhD zJtl^QL7!)wb!}7qfM4kA1q{~iJk_4wVYH16ZJ9b{h4*`V^rZ!v@0AgdcbAtQq@Rxz zyc^E;jthFQ*wuwxgG$v0goqziwhD>eH^tZ1{y?zExs6x|%DIEtkT}icB$$cpgZQg|^BO>0uQKnM zHh-FTv)mfC;TkumSLlu>f51tss>%&CH%q|VbG=%Li2Fx|+xM&4Y&cMbnoN`#K7!Tt zl2EcZychfFQ{yYILga7nd`DO1=5|%P;C08v9zZpf~B)R5HOLxW5CJMN{dtyXRDl07HjE^E& zikjdsZ7qh>y-yt}+$&v^7Z3g74_5m~^+G2?i(wbct<83VhU9p2Z+eepCulL|t-I@{ zIQh|pC9qvH>#eseEppoz`i0+d&r?_xGFlF!JG4!^J>V0P*t%Gj({<(E8F+di5iM?O zlqGb0yifG!pw5t)g#ByURkW=3el4wI-4XgRS9(Hl-IY@DSoKzyLyf8z*b(xsZH&4M z?dQr0<+=CZIU1iRqKKndV$^n*`sX^?MEAH2nz8&{c%C`N?YpSg=SmpUsHiU2SZMkl z>=qSq)EX3=bZ7SV5=Wjqp1feg+>GPi-WV+sj6F&qm!wsv{FfllsQhtf@w@E-)mf<# z)+uZL?@?EQKTHM~9EwrIQ?+sV%@`Vg8XfG}%mqLnf;b68pPA_8jbvH5d@Ukc-&(`7 zDWM9Ykn@3cEd1Nw5^s_jAUjFsb~9m#sF{VBY|7jE0+Ke{TgLLxRb#@nKt&u80z>*#9D&xiw4 z51>L$6DfY`K5y}rcIE-oySLsvvpRo9YXi<3gf87RJ8HhW1Lk9&-OcU3yL3A1x%I^7 z?7iv8G1*%0iH*yz$&Z_r9kSsT=U8qSX;b=6m8G*T2j}y2V34`A~$dJvN!vmpNdpE+?g($Y8;#fjyps{OZH3+(+GM zDfUmv9x3F!+ivMPopNeJlN?^`o|!8AGJ!7hSUr%S9u~X9@%hs|SL^rwtCvx^yR9Np zX`m4IBV-XZ`y+62+Ek`0rDXHxT>66_2<`WUlH-2-c$bpeUjL<;c5Kx5-TnKymrd<) z1eeoOHQvH+ryYndajc@muH0Zk;SWZ6^vHuhl8@$5vB9I_XEEhNxU!}!%VR@ExsyYC zrMGgca;2URsl{dG=CUM|8XJ5eq#hDNc`i~cD$9rQK)U7Amq*uHbSdxuytu9RNXMPN zqVv(_FfXIN>rw~DG!r)V?)K>^2?Bx8xVG+kidl2nHGMChULn5aShha)(pTV=JC?w< zDwc>7qsd4Z)q*xH3Oi2Ln34cU@ zf%?-=^W|5RJ@O9ZSA^=_CU-1Jcx~Uc9=vhcs3kMBTJv+~yp&XkISAvTTJnucFM}40 z?Ix_&+TK0e+WU^1vANd>+|ilf%kH@rDF;=L1 zix1TT>n8@H{*{fSPhZfh_NUj*1gM8sg{~aLs(92mvN(4eBEjpU;9v)u6$Px&4+x3bp zDct#xt!cx;!;Ox27OdEb?_NV(L7of|k(rV48XBLG6m+}o4=RK~5t=^&)c`83#=dKQ zd}IinQ7YF<$BW-D%ig?-Kyin2ySEPv4N=1GVlj|?zxLf_114%^%_1`~(IWcdMP8pX^HWaw%Y!H<@AX}^vxaV)VppT?0iL?#GW{35`H8{=9H;Xak4>o&J}djIln1=Zunmh-o_&K|T_Sfdfr zah*qUj;Y4&DnUDDwwnkq95{@I_%mACA*g7&x-+r5y1EY?+fIu&q0L!nP!Ot@UO2q90OimD@5?75Z}){jJ})9UNrZ-dZxjjwKLj3>pi}VfjkGj*MEk<1 znHXfy0oimFZjnEptcD(GaxV=m#1E}Gd>P!R++&WITo`{NSJS`C`Rth}>^o6O$;Cwq z@>n9B*RmNuGS=OkEW~JXtm+z1uo}pZJaueO7YK(q<_1 zqHED%Y@M~RxC)QCLXLeC4eZ0lMnPEHSvfi4!EhEi658}|RlDqm56|=MO)piW#sfT>dbRZCv-r@Yp||pxFATdDU(3 zM@)^?^KiV05-GMwx9{V_&jvz0bm!T`7B*jW;>XOCZkly|zYi zYfH`|>yFRG ztEM))9E4l-!Xiu#s-cP8TU%S!NqpqiK(4+St_@m$1$CFVZpZEFk3<$8$JivS#4FjJ zLm$?ybhfEV26!F`vh}VRIRb&dk37%TPdE=Xb$frRjJi5JeoMgdBGZOqEOpkZ)X465 z)uoaFYWsAHpdkmGL>y`QXC9ic#z1$O?E7l=iBb_wb9+)xZ5`*n$BLc*cy@lCU#HNX ziA{D(gbg+v{uksq{-Bww64IHUr7qG|D+LU!>d<3N^2gBXXSa!|4{eZ)bB#l1=g^1< zs_enGPOy~PSje4o{(GbE2`#O>j7*RUZG`RSNLutt?8yj~IYBV3|85ln*m@`Tn|Gj*bE64#9s8^0?BBJ9> zuBn;|<=*Ff8(Nz~VKW%F4Q{Uudg3Avoj``1ApD20Zra*J@7=wdk(ntXyppxQ)1>a$ z1jl>|HwoQx@4|p3#D91Kvm-#w!J!D9(`6hT9rM@WSW3`Bm=DfZubadKqo%3B8WR>0 zLILye`SZ=ubz6S;E`kiX=jr`|Lqcvaz>latNaqGG@`S&376!zfAL;Sob>$}V>fwd{ zJk9ed= z6V9`Fy8;DK4LevtQ!_Duj8~EY2Pd!QKq*(PCYm4N9=K*(1rM4T&?$Lx@)29diu37B zp^n+e%oVH+Uudw?cTz2-4<0-l%*Y3fYoXBj%l{#w9-*VB=XjyQG?|u1?n$n=(3qH28)6uNnWUx{%6CiL?tgNhHmcMjg|{$* z{roTnhlY>_leS2)YN@;5&*hXkF*R&XqlLuRw>QX;A#NC+gjYCfq6Qrtki&!Ef`=sN zFcX&Ow&U5Lp&7f$2ImL|8m%NYyf@T%4VRx4_(g8j6J3PNR>wX@ZBFQ)S@;9yOc=V0 zgSGtHz98EW`jpeGz+eZhS%=#(RQl@2ByQ$6!dhurtw#GM@SK6SW@a1LUw~_c+lQA6 zJ2BR;=H}H_s`xiHHYJrGKicRh`SC;&OJsPUY~yLY?t}W9u&}VfeC>ayzq(Ki;=vHP%&&y*v7$90oSZHC`E)8j z`puR1%5Ab&Udmo5z+zp{9v);quq$UIGCLz<68T{Azp9~Va2$^s8F_=vu7H)@0GE_C zzp{+8NomQ?-#0$iqM%snk`P$o)zUin^G~GAjEozG+A8YDj#sw?QOsVjB<~Ce)qPuob95v-}44{9yEn<0fo2-gcE>n z=?R=$rEmvvxTE_+k+3nf(5h0TGMg>Ip1nXwmKiJjMG5RBmfVAkjHB46%QSv;$%wQ7 zh^4NM09$ce5BY4NIMB3qRj-UbX!s|Z8Ub{>(Gv-t zIRB`>)mNkH)fVJ2f|W=4jr|7?X(EcA0b4((^yK2)u02%$hiwg@chf!yL@TAO1cWcZ z%4Jx!8E-`d0EWDi8_^XU01JkA08s%8U2jQSJ}X^c6m4MgjjS({KXfsdG{yPnnvT9c zUtwNG3Br+<(2q5ke1BPYZe{f=P6gi?_bOUL^O&;o8P&{A+4CM6IX-7HX-7n6<7XZc zLIky1kH41x=)dF4g#iQFo!08|a?8-rPy)!}K;KT#Sl}oEyr0(#_9BE$6@#3 zwzS@vF*E;no4pEVd;1_@i}`a84d8X)^;E8Ha!ShbPOIOy%m}+1Y*9Y}g_iCW)0UtN z1ydZwLHoKpK!Yj>aMN*Rpp^wcA>Phi_GogF)%rY`whXjr=?c`f23z-WDxI<}RuZev zrkq7^P})Lye7L9QTP@Bcw=ziGz03bE>*@AN?gu9&LLA&kf8)#`nQB~7yvD?EYs*Zu zb*4&2KSIWY6>T5FEtV3K#js8@n}06Svv$Yg?|b|Cl`1GIQq!Vif0z7`zakwrH9ZZx zn6zGJTJ3qVUv#}4^ixoTzA3M^s%CxDFW}NxlpQvobXv$$SNhQGn%^AJRG^lY-esuK zxM3rB&4(PZY8c=LL}2^f#;lYr8+K=8>MD1cD~qkXwKB>6zFCy2?X0ahP!qLYoD%S8 zZzb~noC|N2UK=()5jAH8s&KZ4bdAaV+C@c1UZo&(a(qe)z+>*4j01C=g@e&`j|?YX z?Jt3tw@4=EpUyW28Qx0~jUN7UW1WssYrSiwbbPqH!!$g5+(#Iqr!+=pu-S7_e^+t} zPr+gxNJYXC%xpJ{k&N)HXp0@wIT7-cjwm=0!%9T~eE;r!718~;YQPgO#k3#>v%a#l z?(=QG*!>$$jD-}rLIZa@aUTCVroBI8rSC<}57(ylQqP^dnkA%! zUT!VtC~|EHc#8;y6Ej{csUrH*&^JfP(b~yE{-{sA*4h-ck!NH&Ti2u8Nr2`A%q9mM zIB|VNITRI3qtE0nvApY+;OT*5!{}EwxM79rVW?YhOuLj|p1;U2QarR+GvQiHvw?1| z!}}@WMWId%^1J6`Vg1kURRaQ7sPXUZZL1MCP^2Qb_dWSk>y4?Q+F72~*Ce3Sn|DB1 za_iQdmCqZb_ z`3Q@V%X0$A=6RgU((Kvzg}fi)wTB6C6B7sTAtsqa(yaDaeyob>OOO(uU+@n|E*q#vwps7Oo&sQZei@T7FNI>A z{5ihbmM$lEP@@CA6wi+RjHW#fpY9-j`l~pwgF(w!ejOg&P(Zj5(p0n_Tx3`nnhgG6 znFXB-$J5YrHMwEwVWlM)z8je<`y-v4H(<)>bo5ZKg#s zAo1;}hS&IM2ZyG{83W8Fb%-Ae<6ogu+930F%orYbT`P31hTg|Cs=LaOgB3<@JUc%@ z+Ap#89M}@AB@bkIw+6=}tlQrpPT4-ls;TV5`<-j&8oM_?iBBKmGf@y(e4EtlDnE|e8_jCED`;zG~V|2O;d;74*rF4 z(s0CU-!CW6D67^OdgZOZfCNjM8j;#Mrt6PuTq?Qw{IES6Z_i7h_5SUabPxVUUJntP zDrGVhSV5KniI>Vdkh<^kSfoC4a;BF$91YdcI&Bg&d5QVex$+A$YkD~5Imr?y2TMpB zx>2>e6IUjS?m&^szh6Iw$XPCOi%?e!mfg~ctU`198mvKrJI*ZT0f*L#iaY#RD&s9&ntIF)sD1eH^NxM@ZmQWuuJ8kHs- zX|}-Ce%j;96>RQvN6+>SbwzXa->-~tOvqXJVXPYZr_;+TJzai{8+39!>FN8Uo3u_Rk(5@7ep# zUWvdES=|Tr2|tboe*o2S*9xnMeJN|PbM3_@PtwO8RfCg>?#@8%8{uV+&m>-7!#T7{6$^RH&Z^{K49ISj6DL-*Y|RMJ{8QM8!j^NdWXJnuu?gKMX!; zHUT!W#SJcN{i?3bwVn7IGC9qpJ5Ry0W+pP-*x8}q5Qc8tW}g>}y>Cs(8>7i}81Hae z`)?d#KaxmR>=zmMKjKxuWklm&>7;0JOw7+3-ekiSC#dgC6mZ1EzI_j@QA{40hPmbe z*52fkKu!vli*^Fd?RFz(AGzT8$W}EOERkb-0nBQve){}b9aQTvzwi=79_rtTVYTxn z?MJ|M-XPAMaS_R_E$ViH1QS^9*j`=#U%Bo7Nq}EviHTi1iMN|HJQ-plN(9sMY<~jc z`iUZ){m~=xq|B`x`Jbbr-xdcJg)M*D>*(z4ytM@VngZ&)6+8hlfpOST1R|pGFO6&` d*#8z{N%$4sX0UOvC`a^BgoXV%lKF+({{gcHl^*~A diff --git a/doc/source/3-Pipeline.png b/doc/source/3-Pipeline.png deleted file mode 100644 index 5948d1c4ad4235efdbfe6caba5989eb2201cdf72..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 46678 zcmeFZWmuG7940(s5Tb-gr+^?xNq0+!bR!@jAl)5O(h`DnNXO7!f`Bwg42^)~5YjNz zp27d_wIAN^yYF@FC^$TGo)f>g@B4SogP)WYr5<4rV?ZE~M>5h9DiFwhbO;1R1?>U2 z@)lcM3H>718YiqTK;Cd*fH5(gZ z#yAT!`W@i$6!$qNN1wK?Tn+fl4^0K`R3tPZ1G;LzU(v`0u7THEhrqyp@BDgka8drf zeJ=I?HSzz04M*>uukE@Y#=qf^idCzz%@DetpEMxjUMC=-k?C>S(dddIyYkV;Z_+O% zR~m7Yv8g-osh5p)`kOdhSdpFG&*odq$=$Bhy$`B*{ku zIpRGH2gN9-X;ZHYYCqxB+I+s6i#_s8%G)8|ib!Cs^&U_d6m#!sv%RC>YO2@b?dreq z(v!6*!?~A^vC(dI=+XI{5YuU;pQYEh4omuH*musud?f~6I;msRIl0?zSnVEg(#ShB zCFtwTAgPdwY|J`Nvi}8O41E z+J<=?-Evi_=!4Qbx1Y3^zj~h*XiCCOB<_7-HDwRxuhP0o@?dW5xTv-I<)73LV0UyP zBI3`fq)sI?-p&W#nrmvhg<{8@wv%)O$){5lBMAj{KJCh{@Z1Q+su^nFAJi*fUgUZ) zYFZmWiG@oy?0tn(#tMH#VA4kJp=rQ{=pXT3a`f&@KBaz57%fFZN0Kqt*5;c5Z9^3G zoKwZk+0d@95q1_9%bpz9x7`T%XT11(&Ecby6&LbY|L@_+Nh@8XD+z3^v{=(yTN{dh z0^A@w)ccu&2RV2Qn?=TD5J? zs*Fr&BwaDIZa6f45gV_+&41FC?o0Llc)hvpDhr23Cf0L7&xF(LCg%N((6Om0RS0?h zpEU;Gof+nuL1yMJb+wA98J>#jw6ywBhp<&Js$A^#K`)({&%zLFP8ay+iS3n@>AZ(k zU1uR&PVRWfL>Qe<CFVCVW`>7YI#tLmjiZ9_7xSrY4H%uB{5@YmY?hxo>f~xl~rO zDV|^dxS&o<-3U59#RsECi%)*bhyTM0G>!sCT%UT2IK80zmAT*O^W7SXsa7od@rtHIu7k$jk8hQ?4j|C1R>_JA@C>Ic&5naKID`peg zPI@el-6F6H_Eim;u9jl!$`lQc|DN+X4yWW_)(G!b)K>W*-YHx2lcc3s%~9N(dp52u zZ0Yqtx8RXhasvY#8eh}T|2&!T7AiQZ;!W!|X1-ogA`@(T<*XQ086M6urGBnSGD*^N z0mn6|J8fi^S(*1P3coQx2)Cut}I$}8dOc~Bv)GI zzFJ2#UJ1?C^lv=wH*C^~WjS+)`%mbjl(e(%ZFPl5Kz_Du9V~9oQ_Ju*I%-0jIK9aN zg?DRfx$;%3(!G8aSi6cE0vM=qZ}C_k?FndPxO=yBdwJCxqZ9xrk@KY+oTPgl)7cup zqO3#HM@ufj%&{`Zn8ZIWR%=Qb+b-$yz9Uu@v*JF?;qzseA+SDmedwXx1=5l`)2(~B~9b=sP1=Vv7KO$X&pI8lcHefqsOv7n)s8aX`m4?h=0`j zFBh{g5goM7xw!~1gIGQd{@v0S!bd^R0E8^)v^4gUIz>KxI&(2)@;YDrq!Tr2F;nQaOQXvvBh;+`_dy^HRFJpqOna z?#H&WwJu%o-E}vFE%zeGHJg!=2m-11qE|#Ld7`(XI_aZn8MKcKyj;ZaViR{;YiQU%QMGvv4T=zd%lcHSNg2h8qNvu|(7j2J z%kH|Tb&lW$dqLS5$#%T1Wo0dgLoq!F&7d+CMmQV&!{Y`fm!{ED*xEilFJ1oZjeedZ z6?_vmHWx!D$HM&g(e^;I_3R!eJYQ*ROR_E))Ak?ZdEg4F(VbxN@9tRke7Z9qe4Ujx zzghOghBeo#p8)UJG%HR7#FoYV`wma>3Bw&QgC9;Xm!EvHam`q3Y#P6ajZG=84nU!e zbhVOjkc9@XM-irZjl%E=-!)=9{Mabo`Tb&85K43EX7YyYhI8%eZwLssLa_LZ%R}Li zuHk=#jAlD3?tT!izf;e`c=Gz$X|p>bJmEBiR7}TEBd-xl7T~9bS-bMg_g^Oynjd`> zLAHHW62f^!e+(t`uH050=^OCzPFsfx>lsx&>T?I~w{BY}Z&)8x)iLsCdpCE}05xcq z)?_jG7VTT74(KCl(dE`znY!Y{xF@DHE6}^a70=DgT=?5`>o=|GQVe5ftpoKzzD4cM zA6@0Awz?>_%A}d9w#~jj^3!8j z>mZyg-exGt?{S-w$HpI$`N7mkb=nJnOdMcURHqeFJM%vIMJMM`N=@yu(_=stwoNxl z)mdL$WtJ4|Fudd#g6R{O0_+0O<6KC6>pKy1BOF{8gPnSQDc9ZDKXAkr^WO$wV-s?| zo9ir0rx;ar72|uhumyMXpNF}B)`$;(owHuUwNYkU#=@TG&vtH zOH!TM52u(r%^j=F`o~TxOeF)9UoQ2X-|^;Aw^xfjB4FIxt2`+0veCSrp>Wk5-tvpt zXVECzKDoR;r%H(;gWEq{y>{5zcw^Eun~*kdvtpjLKZfxy%be)=^q6VvrtDpX=Bj&> zteAkXa5i?w??`niFLIKBwX_jG)h%~=mj;0 zMouk}<9eUqFhQU@eCr5c^v9EyzrK?Z*}Qz}Z?d+$FUGg_f37((RK1U?v}sXCD(vKK zUxbA>s8rrn%pRv1e&C@X)u3nQm3-0AP|2&>C|GOU^VsAK_Gxp2Eb-nXo$lhsi4ggv zVH;@-AIUWNO2`yd?_WBvA zH$MLi8;DD$NbsZ{1ZgRGG~h;^#p!ZUAio>do$qP-7oF@poc>Bk$OAMq11~qMPUr8U zAS-1?KG3O1jfT_^ABON0eBG=Qy_=n#kt!ep0tK@)9biJ*A=c5+c?@cRetsUcJ5>lI zk4miVhK0J!r+vhJEN;$|(Z#1zDXd3;JpX59Qr6KC)Av~D(Q7d74IwRN!ugX69qC{6?o z#C@@BZ_ZqGBZD+A7muM^N=4bV8puYq4=-Os#EC1DYUwM&VzNXKP2mj z59`lEjB4Fm<_)ZVTcsU4rT%$%1z47Ie^NJrW5N0!QjY_&8m&lcYP@IDbhCR0U@Uuw zm<*M$A#hh%#dQ3J3u;gxC4I#@_~2kcZX_@Ct;f#QE8&<=zpN1 zN}W0^YQ?+Al5NmaIYB(+=z;Xcai~sb_3m^z5G>u3LRaxlRX6Cf1aS`b$-(4Kprm1V1o%nPJE92C-a{@KPylwFI`Di?( zQyZ*!tG_QkHI5pYD!qdf75e4f`jd zP|6f`+CR@|DQvyPQGL(l!u~B${u5|r02F!4&CVN8rUGj^b)lsgf8YY@mrvCYS`>?` z0r}Ts9@xtn`wK)N(Av4kJs9vxE4Kq%S;*J_KXy_64{qT6zoGwc-CFy9p83DPGXx6g zkj%Yiu9S*-@~62~)XVf35Xx;Y(<+kL3?D>ct?i=X8|=~N@=0$$=$2|U=rM<(K&}E~ zupu5J#x;Fbb(;~KiiWzzrTval*0j_+xfq)*IiekSx8N&s2$ zK<9jv*rX{pS3#YwRju;{wDrT$`Gg#DMrK9@?Ois9NFrLP>_{=tD`rK>z<6d)LtM{&!LlL?<%v%Z z$ar;f4sxW_b##iDbZhCkxiuRa8v1&BsaWs%*Sk5@!t{_7g>zV`d%vM zQm7ZLekcYDcJ6${U4DiRNtL3bf{gv0`M@V+lREQ~O}1RO=Ca$CABy?RSvQ=<4cn!bf2h1mB0uJ|qTjiXq=vUNq{YMw>l zN3f>nFdTexjs9s3CarRjD)V8pAz)rfIx0V-o$yi3BAWq1NnK+OUyWLPywx{&;0F33 z%h7;rf%(6QFw6e7mVE2kT8o?OD;;o_P+3_?j|~}X0B010Q8PK&YHUW$iuHQL+{}`2 zA1{(SRN(f;&?+6Z=689MbHaw}GrBcCS02{Wl|da4Rx;!)`3+JOG9q0WXK-^?ef58i z7=v5%lr_wX(mjqIw_HOnj!tH6i<_JE7&Xh@m4T^>BBz=wG*K8{NQnR$F2%lhp z&Oe9FGe-|j=JMN>S8E%On|<0u!%I_K;Y{hFZh{q0L5F^>9WxZQq>+S(PHfn1^#2*{ z8&1h2F_X`u1icf!0tVikUax}(5v0RDVhPbC_DjBjNN|XCP&pK4WjcNcrm>K3Z0ffv zTzMr#(ptigbCjCj@FqWnc-6u({?th|di`i-M|3{4!hGoG?tb6eJ~pv&dyV$X zA|klT+SDk6!K?59FxXq^E^Q_cyHzj2gGJxsNRk_wXu*>wVJMW@IJl3q0W@&Z(iHGg zwQ6Q%nm84yjIndPvX3Y2Rn#^vB~B1ni!EC!DoJmG>I~Oz^dp|U&UDMJ6nk{cX=x=m zYr0rvZ_h_Ukk1k;C3g@M92NYai#tO@-inc%^s0zqg+a2Xdtr;b&Uv( zpSUmp4f>UT=66puBT)QxvNM~IlG0ypayurZTKgqpxsP!oH72tbyBJeWC4yE%(^Nsj zvQZcO^M!Vjcrv4xJcHiT%UId&%=8M~TLp2Wrs^*Lh|VhDWREa8wG0f}#m&iBnB~uc ziYV!_ocL7Du_`aC3Gx{ac@Ao1F8U^2R=8-qJM!imDutJ{y}do&eUTqjG}}^~%;M?hDgI0-+FR(S;IrHm*4FZ*ej!~!Id|% z3QR^+Vnu!RvY+DieOdBh#$naG(rxLd1 zTar};>4*eZN-$C=;lr>4r{8abxNPPMKup90fJ=daEq}W<7y&uBY?3-f4UYUcUz`Ug z-2a{hNQI-JUi>7_D42ejXt_1ez~E^M?ZYDDz~{55`cd%AD&PZobhie%x&MrI3p^N_==AUwZUjf`>w%Du&OodlS0U#xruS$A)@mlU` zouB`B$@dE(bb>FfsO>;DS3Toxt61Fo3LR&V+U$8s*+|X`l?DOr@H`jZ?ti!O>SXo> znqR1h^puQuQB{9f7uy#tQ^V6l&}S&hBT5j(L6VCTKjz?Sm2ktz`--ifq9*t4H!Yn2 zn5x&5Ffrj-5nAyGH&opi8X^Hnn#0@jOW$M7qUvfjpvdsTU@*7no0Q6|=0kL=Vs5$z8Gh{(EiYN>9DFu{x3219q1w_+7jb8 z2fnh0!N+4zAWPlj4->1UBnw{&>Svgm-Mb%n-_d5&`cz0-gz&-T;ZvVITZ#mL@cUi> zLLMun(?ADbe-8fqKq?jpvp^(z5aM53nnlrU0$bzV6`5kUmYy=M*&0J%`G`+f5h*{i z{M0Jmdmgiu$fz3x?N<^(WAYA*P#ewUbq`UW}9( zVbPl16~lOAGqRtbsfAAzfw)unDW8iB3qr2SS7{7_UV&l|G|xOZO^Lh`3_Yiy@uI(jCgcwjdN(ivI_Ma;rnv{ z9}!L+Zk&8*U!RSBJ-h>DOt?S=VYNif#3=*L*3>20Ac35!fq_7c+%}}6AsjX}UEi$r zq!=&w!{+tynbwIjd$n5^F~~65^MH<(m0#!>)XH+HNI0cOGDvVj$>G>05RYiCokBE6 zEXodCZyU~=i5xjRmsEDIS=j(p*fgDHWlls#N6Wzg|AajeZLOS*Y!d+y4(aSYi@?KY zxr2QY>?}>lK-mmzqmZWkBaOlgm2wAX%v&{RlYs}X|hggqIzGxNOv^wTiRvt#ID@HnO04ymIWC%l% zgcNfIKLcI7o%&RzGydzB7!@S2#)t@)i;ooXlAW5KvKb89$YAx#)hns$hYqLb(yz?S zzaR8ch0<8lGg%{QB6!*=*}}gdyC8I+iH!fz90=){Eq{ovTcV+!@BH%hvf=ln`?3S3 zCmp$>j1(6i#RBt%4R;^ft(4&33Js&4&wo-3)x ziNE?BT*7upuS4|c<=*A}+g>^b%F)ngUw&b}voz>h7B49DnI97bO;K_m zMgj#N{ijS;QuiIM$YE)s1Gw8oG)2)i>*B+=l%O;$zHCN!pqZh60BPr_e5LB``n1B@XXJNm}WE7jJg<6H29u#e0(DR32?S7T2hSEl_# z4C=7D+oOC+c}XW}t+X#@_X1%YpWv#&om7x$0Q!{0A;2Z4NGt>NOS33OduY=Z-V_?{pg#F- zbuh_9piIfR&#LOpe~tNYp5jjOrCBOUvyu-R)T;q>-+jhBGyYZ5A!?A7oT7i=tq&5v zK)-xAPjEe~xhC>mFi~4vEVBSi50L$!O>x!jxeQ#Ckb+kYV1j)M>7M4qU5=x!G1*fs>gX#4~q}AY@WPAbB`AR7Ry^L;_e#)&t_9*PPGi$vA|K ztYtAeqi$RHSp<4ZPnlKRd&Hj3=UDc#LSvT{F%KqK8F@75ey-qD4OAc4PeHy|gUYec zqw}LVYtu~Fk@&nz{9s@^C6RbG@-b>L84}?j5LGmIO!??i49td`oB)5mgRIk!IBr)% z!zF#6$BtS4wvFEs!w2yW5N_)Ek!<9K{58rsu|0M@tiLhV=8Q{R=FC35DIJq}bKADP z9KRO1ghJcw99o&$&*;f~j?AtPKY|56LbQ6bA!LCjwf)fWfGq@^BmF zbbg;hk{ocV7DjPQ46|4u2~yf~8@&yEbbFmuFHl{2sv+D?zA=SAkSZ~OePSvv<=?(I zrqt+?1Cs>LR!B?sSeH?;Z?5tcZR-41STmqZI+>IPH-CQ)+UgnpHj^0y;-W#yw!+2HoT zv`ZKSGk}FeNfpT&zn=w3(iyE3L{Wm$A_1&4x80^A@60VSmu4PB{fPP3R^A%pKG8geg4NJB2ZvU~akataX263lbE75rj#^Ofa@9#Gz_l~I0B z7-$%pvXKqVr))v)Xe4ZI;@u{XA1mh{DkJgsR8#F~_sSnS4UA~IPaOHhI>yk^f&R{> zZ;|--z>&xq@j=XAC$WGEZ}rKVIh;UF;KLD==l+Kl&p3*6sikReXAhwicw`|x1xh<4 zAoOy*^3UoHMe`Sry1PCm@t#|rr1Ua1iZvng*13Lv*?&+O^`zR|3k=2cUz`9nt!0%K zecye&#aB`PHDTp?M`Z+rHSygw>VdCZ;Z6RbZ$H&AF!B$(c^&jIzK~_X{k zpf2yO1yT!e?A#-Xi1u^#yXzAY!)O?{(TS!cj*si{fy*QXOo{_pa)Qb;7VIGy{j{eD z#K0I<5k?vi5;W`93FRr=BwN(A71@Y<2q(p>?fFr#`|im1NHmewJ=5LDb2KxV$r81T&=Z<9k<=G-V3Io!Q5^HSofQm@>y=4 z`8}`v9lSr!oE~FfkqU_YGjuyPVR8&>CY-^b-xIiP=K6MYFxp zh_T&`@=W1(GtdY7pa-xGgdBpk`hg$~W@IGPvhQGen(nk91+UC`%?9oVLe$Do-I@PJWeW&nN?jxnm1lUbx^Cc0^mFd}}R#Tol z<2bZ-8!dboAHC*LdSdzMKcA4vSU%QT_2jV#Z>Z0Lb=5`0(U8$w{k^*RPTjhmIIvvD zdAsnt??=QtCK>rOIHTg3I}&~NtIIOj4GYVhTG`N+GZM>7k4r$sppnv*oisW5kQ~O- zm-WQL0+01xfY|N*4;C}AxNSaF5zhjTP-jc0uRSmI-0&Im=H1o33Kinn{dD%Y*MCh~ zk+XKTtUg<_=kol3{&1WwYfnE2Q}7lG-`YL$pj(37>BMHLm<*-=Rl>_{vA4@za}FDJ z=HIU8-)qx+HoR#zE!Az+F%f4PjYV#tqpDsw^F%%QRjKl72v*^VM8b1DL2WSW1PUP< znZD%s(&Qppvh%YX=Do4`!?g3oS;t11LW@ms!`}edZd>ip9^AJzukj#YZ?#QGrh9!W z>V|lSdd1V(d&&5nSgd60cEmNY|jwBwj1HSTGs~`MV;OLWk)UA44xSU<8A(bp6@mYPyqpqhd5usvYHn^g%5gD95+5j#cD1dZ1Yik zh|e?@Jt@fK=@syp+)cAs^W{VsE%#!Rj5VbNv! zG|O4ZYAM^as8FaZL$Fl?dcAfYdgk*jsy}{cJ;lFsb5s}&!_yoo7bnuPuvydC&1WNb zwvt~9UUbyn;Z;hV$Ub+>`_=Tp`K1Xo5sc_)J4j`DBQthZS_kO z&Wl@8jq|4MbOBsb4ZvdtIAg&V`b~+G=PaEC zUANxt>nM6sqOkaB>9wc3A~)zj1tiZ7h)bl!?uUQ5Ke5?55nYhCRbOj8UvkhH0wPxz zq#7_+h=X|--TO|)wu#95ajSy!q$i7!?M8=&hJT7$H;6=yhKsg_Hg;}!WT@H=M}}%Q ze~!-F@$|L&fZ65>f#6tOwVbNl%cLlEKVLOE{R$pn7ckjDQ|uJhs=`|QH2j=*B&3JW1QK`^^X@Q;P%v9!Y78c)N zgT)5n_Y_KwaZz5EZ<%EoLVN zXR17#DxaHL4e#zyo!0cwy$4Rm8C4+U`5ubG+ijT z^Ev+nFoyM$6Q-uVHL=*7c_2)ta-Ja+pWm>Vy$DBSpCi6+wZY^_(tizK8WWrt{|97$9d60DUC)A#rozugl?RVCc=RWYfDp(XQI4C4__eG8IGD--?tiE4IX+Na}S5;b-53h?dbUWyh%j5 z7yw*-e=9JywdabukJu;^U{?`C-er^Ow;Bwfrj$8fCkJP(gs++N?-BF}Z}Y^1MLwJT z=~mis`naWf=|#(p!6#(rIu$?aZUE`;Jhx}wmt*50v60rd%X`JiN70z9ZBuZ=|vLvObZdFWTR7^Q)+e~Fn4{T>2-QzT8 zD+X$+(8m?EC$P;k;$2L^CRVwS-o`R~vDgN%^5pYAsJ98B&guDoKdz)dG?+b>Jd9*+ zPZ!c$ca+<*53TWCL}F$#*JS+=#Z@P}7mmH?e=1RrSgb>IL^Xy>>w)p(9}7NZt*M^n zjV~`|3*y|Sgx=s|LOJL%a3MK0}WCJr(B{2GY<$g0Bc0mrE;$~F5Shx1>_d3&l(=z>J&pX&xI_9_7iWooGZ zxdAz^b&dCkPs2{x{Al0^ZOv$oV+hyEuvCFO19^Lv?Xj&K9Dl*v3|r{Kf| zkM!CApi zvU)$ziPN7Itc2t*WiQ4sJm>0wKWpfU)Kv8g$E%%qd+Ou2WU)N1e)dqv#sVVAVs6^5 zWZu&TY}A}S*6k@@?@_tEk3JXr@T~GWu!0QM(LsAR^7d4h(4%d-MVG?+nm5RA^XfOM zFk$&=y0C96I5}aZ&!A4$B}xY>1u2n`e7vt$)K4%IG~)`ySYT?t9pG}L)~A(eTWr*3 zv38SVAMm|E_~vT>CgrU; zCtxr5 z{ZA{B_d_NHOND*3aO*BFJ(ir}%H||y7 zo1goJ{1NE34yvdHJ+k$w2I}!*h$@WxG{tf zo9QEuj=eHJdQO`Kcdg)HFUeJ zsD~EsE!k^HQkw5hDy&Oazc5#6TrT-i4ph5!k1vdz$4w$uo(vBq^uxy{JDGOYf%c`Z zXlq!8#L4UofmXi8T@6UaM#6Sa zQ?*BS{>{z~-aE*3XE=yo?6RTMEAY!xzk2Xc@?awy6SexNsM}ZO;EZ^u4lmG~3>4}T zmERws-wigZtw~phPve6P%DEu9YEY*jmE#j2){zQTMc*WW+d5bvKc!~*goMS-OF=#N z%bXTH5N1feCUc#K0(MSt9;`w1emrHHJ3|q;qhB$}Uh-is1DU%^H|9@$Pwsv^sL~k2 zW`oHpqfx3nLTq(Y3y^!8*2x__8Y7L{U1N4y-oqCF&ezv?TDHkSVFaicnO0_l4KgG? z$2uccDZW>F(4jh@kNd~wzJb6cC(#7%?s#{bZpE?R(Acllgm}=Ib!-}NR2hTSjPn>3v{^Gv84@A1keXNR+tLu z4-2uTnau~et+XjRi}4g2O%-T~WY)on*|Oi2kd`76LQ zlwJuwM-4hdh&8I&BZPs>GkSqx9E!1Z;X{ZM^wLpDA}H{-&Ie?G13&mKWc1r^fCXOE zaF=QdlxR85N=Tj0_+{=}OVi>wP%@T)xSblu^!!2LWlBN>MTSYO^>L|0GmzRkub?7y zL&FM$eZ_6=qDVmER8Q#ZXNjjlRwGC;j#NG^h?Mmj00iC5!2m(}BR5}n_C}p^S{v9x zqh#J&{(4#xK#F?@9Cp;Pn-4t@TmaMFxf!MgjP&Or#-StM^@7L$(hAr(4_e|_?Rd-- zR@rt%xbc~<(`h!l9w}KI3(C>RsOCN0`ebf6p8QlU@=8U2(_?v>;JB&>wnb;P%bspn z{{E|h`;NlA7OS?WYg-{w9?A59>Q>v)>$?ev4J4{OB=A+@oai}LX>UliFgYGk@+zi> zdj7r691?dzBek4MJVz^KVoE+5T{6;5)X7&pE*uh~kOB0XdL)1u9!8zy)O7C2y^WibpSA6A36oT$Vl-0sV^d7DL9KOKtYujNDu@d;F)GBrw$gr z5|AH$HelncxVX5+e&sc|?9Safjv*E9k@M0karmBy%eRo7WO=)z+d%V zzqm22CZ@-@lcI5tN&!WX?a|m^Uj9g&X@qCjkyqC8d0C!?2bq1!`?zbI`0WWn-ueEe z4=Ij~bk{#AQaJJo%NO(VlHj;^ugV$?JC&6(Blmd-cY-ZS#8KIFUO%uWvQO+hjoxo< zS*N=j@VNJ#Klsh~f6oH25BE5)hP4k|t#~gw4*y-gY7mAje@YtoRvey*EuzxMnHKoP z=xe9=M`Ci~Khkd}dEw{t$BrIJS>16@$*~t)W=VnNl5zpUQ3`?NsrqhRXW140@<~$a zvDd)o_L3(K!eYV1O0sSuJmy|{_s<&uMzDs1jXe=B;H=nrribXIaRNuDxVXkHMC8Pn zEfiF7Uua$mP^R*8<;_aHu6}!+PyZM)-~d`sao>nM?f@k57>Qo)QgD$=Va z%d~zFFi|$=_ms|F58q_*Kw_T#&l&YvE|lunzoyJoij3MaBzQS8G0KQ2xF~2b;l3S|c9eKR)CV{-hpjecKhI~MHE zRj`?k$?#ACQ%MUuA=eJRNpsb4&*|Bf(E#S9J|G`NPamj1M(!wrzWXW7npC&$N| z7mGJSU3XY_P`)g};-1kApxrwv9DQ>I54hjR=OKxlvY=W+7-qKXCbOGIH5))%Z?5*>sKRHWGn4Nw zx+7Bw+c{<`tu%j}uF>{)Gx45&x>nYsk>T6Fh90%I-LN;Jf96oUT+^g_^5qZ03-3O} zk3>W{iD!?FdTV#d;HR{((2e@xA_0rOx^7Pck+zb-kXJ&28ROxJyG+eZ4QH1bcNWdn zezdRrMJ9IpfNXN{+p^(&M>!!WZmQVWnsB?`1|@nl>>%Iw67y|4DF=sD1g1Z0n^~J{BOAOV)Ra+Iw_xxI` zw?YoEB9j&!T^c7^T_4in{K6GA@>)=;hqAO=G2wye{~D`CMKvEN{hH!(EA!m85$(<# zeNQbHJLOA;pQ`pocFW8wHzIt`=;i-bIh=T@Pt|Kc#=6$qh+n00PbVa&+pkh1(TEK z&IvDHVPR&W3+KEd4}j5{e)%we!rB5gw3y` zXW&0rvDlp=47y_z`_!sVCf0>#QYLoN>q6bVS?DRjTA_=6P_0ejD+>7jiu^#e|qdZr7hAn9YfTt_Vo_M2B0cZ%%S{RU?_^||SSloJ;)E!81KS7~cx=Ovr(qWD!v#G?WC1e4!QLO+ak?hF%X zLDX}cQI!KoWMJ}?mJ8oq3SG)`J53W@i|8hzzT>@ic|QzgPU3Yr9%k!F@)8(N)zdth z2*2eruCr)%hSZse9I6z#z|6_zYOzQ%EP=VlIk~j$RBR-r`}0F*ccT@YxaRPG=LzsEss1~OTpujLrpRwb zLUkV3k2|#I&N#2mwB&FtuXpo-E7eAhsy|zKnmp7FlLoyadf;Qhg9=o4^7p4W7k$*5 zTuTzI-Lo`G=E*CVx?Ml|`)GO**ujJ_s~Defl04WMcs8Tkw5xSNURriE7@Gq?V&g zTdL|(oLZ-?R&U3%uE2W_e`le>K=S@~BY`qi17OEs0rSo;&p{xXvhgc zc>~*;5Ya`Ghx*q^D3j(J(e$l!9in|g*bCdAF57k`swdf)RVP-hS4G`DJvAVx&y&{E zJ$~o~o9J5oeARik1NQopap@FLlm*u03fdg{&8cU zmb1(IruXXUp4%0lg}kortWb1Tf<~NuWz%&4vrV;yg*fF-Gs+H9H7Iy~6c5$UZ)OYW zsok5$WebEE7-ToK9vITz7w3iBz0Fy%Ht%eV*_-`B&<7F)m~uyWij35usfvq7jpxI8 zSCBi-qVD02CauhL_^Ur&O+xQHt;nnH-(2)-R^PluhGR7{bF+G`vpyH-`z%FIH|EMl zA+}-~&sic1j67Mth1Ze-^b;`1IHA~{y#6>()jxM0*J&m~=raepSpN*EYy*%-490smm$M0c~1PAQXCcf^d+{ zeqh}d6h1iAk|pO~(7%r*c|#qL)2ef^;P(8RqusNiA#%qv`*~GHlLuarUyQo>eI0nV zOf;Yj;Uw6y-zk3)V-s($!o(Agt@?AAdk}(Ff(AY-`c-)qQMv%h2AK!H+%1!#aQ#LH z&LJsV|3-C`l`(_2@-x=Gt^OIjzM@3bFkf^}02id31EKzh=EqB;H>N%<17rQ~Xxo|0 zbPcc%&G|{l6AyO~^+vJz!`w@JC0SbasaE2)rKhm|-XRY0vcV`b>#4bdC}!6w7lI;y zZsUL1@bgWHChgdbQ!3eOK=jfSuE5&DE0l84$GJR0we*q*ju*3>8_4^JYwXhwlKqXE zlVw!~O}_77?McZ0Nuu<=)y$&-9sB$CpsX-DH0ylLeCRgrxY!m)b9|aUDK#&398?*? z23rmDM7kJy1T=mQ;;s{2I&p~ zL2~G!yN8e*V2JmO*LC0Pc|N?~-f!Yp^FOioZ~xBT`|RW4ral$tX^2+GNwXcm z4vg>k%tj1cIsG#JlUQg6EoS2OMb;gSh>MqwhEZMXi>q{9I~|;nQ&ktk6$x|U2d6(D zS#zzM%{(pQXVb0Y`x-n!VtGDcJ>~mjCZ(R25dMXkbYG;C&&WSUq$;C(0LgFM*eaDs z@PO`-Ulq_Dx4{%%LNYQ~3a=v_wjahoNE8d@qoL&^e2za;;9%-*iLLp;#$okYr3~+< zglGFC7GL*8TeBBgRx))>hS5zv$K7VKcHxjrHCD%zHygQ{Y1z7gw3Leey5kOT+KOp` z5BUSHDR2?Hd-{tgxAdVN(TKK--cPxLIVV?mz5I#B^{(8RJ82p765*|3^ipk=#opG( zkJ0=Nc&SnKW}YN+YjHXC)H`0p`#*U48w;D2cc@9rGQAY`X@;FAq>h5jnM?&5jMHBh z?}6m-b^6v0kuJ4?Ganx+%yYIfHI|9gtoi@Mc-<@0kMRaTvk)b`R_$BwiGnX>q^k(y zy@s`&ZHz5GE8fNE%D$47dpGcG(I1KTIc)<{cHsNn{KW9bdnU3~T6j^}fJr?Y5vP-e zlhrcpN4yHTy%5U$70HF zW9uywSu)qA*!k^YiZBsdvOS=7+Ub#?w}UGrEVtVMh>snPn%Zb^YuYavjte%AG;tdO zgG4-|IS%I3a?XDaL$2Kb2)4a#$0Rb{3s$iFX~>g6e%&WbmIN$ar*|q?f+MTkvF`Y>CeBEg;@b-`#8f?k9}H(5uE~Bn|vDz5^<8 zS)0|uXCh)og2t}{xFld%xxlV*ZbJcH^_;sjpF8)xRuk>3b{YGXbOO_#-QPEPD3`Xk zyKsJCz-eXszT=cj-h}E4*j_YL%1f~kEX5=eJ2zH$Z)JanWF~RXaPiFSgCO-wHT-4t2G0l6LO@(ebZj_xca;F3 zWMNaE;s7j_Oa`qM0<$^6sFKnd1l)pc1Mifo-a&1p2AJFcz))@1Ow|qF@%HY0UVr`i zK;+0KTo=QRkUY{wG=-0hGRKT{Z%+gT$ z5&FYL$6bjeKJ_8I-?vP?Q7|)Wh=-}mhxh({)>~kIl|YKxjdnIoH7p_O<}P;e_hFN> z^{7n_@@DC2$u$(p_7#H$5cuIJ?+@pE_()aTTbM7%;V^RycNUBYOY7KW?rsD9Ep19V@pCnRD0{R%!z?}}@ z9Z)X79s3ibcc1=b>0CKU%rFI(oO7@E;g-7WU7%>w%|^{!RyNUKi-NN5ArFp}wVz++ zbn-AoS9x}O>}dtn0Y9I<$FSmk{THc1?d9f*(3du=u|pKQXeIrQqI(vcVhoUx{ z(;rAAShjYY`T+Ub5hs{lQqSv3t&Y)kSsBy>q-TCdl_L&*aPh5~P7Dr<>pOzu$GE62 zPCGxfSZJhPoqUcY?-AhY;ZP>*K{Xdpy->irC*^o#g2Z0E_=L|zNw>p$@R14^@5L9~M}+JIP!gh1K^yKTvE(#x<~%7I zVBA%->(1gH>TrsdC93;YxI7q)Gx|J*nhzUn@NK7uX25Rn@w0p4Mqufk zq=miUjF`8x%iRkqD1nMaXNS7~L^2$woQZ@At9}3LK;aDsy0}T^doUX07nK->D|b>T ztnw?q_4~)r_+01u`AVT(@4Qg7+ag%Ndlf|-_B|>>lw5E?CMOjG2Q!?&3IlU=6wtrZ zn+}FUoz3_+B0-(2b6<0s_$xp(a+V79CGGo|ACH~23U`Ot6v~7I1+09o<};oJkmhB8 z)vZRHSeuB@Ny{{QgU>>!aKQvikxpnt5_n)`=glbD=Lz{6KS_-@P$drCCY1nKIU-}Fi?u~y-F zg$j71sA!EQR!aBuKi?6H{CLd99P7$-N$TJ7;A;8?r)<+Q@+Rk*$->WJ1y+>Z^n{QCZ$(& zoqXw-qN#L9nlpi}hqPRng`+%traxbC0rw2hc_-i)I&c0*i8x)Y!q9F(Z1`rbeqCJ0 z-DpvM9R4=jTAF3!?7f(Fe1rlLm*5G-vymBF_<8HugOJFy1qdb@=LD`ZhnPMuQ+XEG^Q>?=*@KbcCCPq($i!1Ld?D#E%E2Op@gB;uj30t%TT7Z zumox$9tSDl76|aNJve{xauc4#^RJvF%(Z`R&)U=H=6y{O<*}YKgKdibNlX#??AaGt zvkJt??!|T%r^jL+j-x77`BKK~X5=6#po?6s+<-=)OwK8i9)zK#fo+8-i4_BI}3Ozah2H-uQ@&kDOBt@Fgu?W;f)OXZ8U zbcX<{kHZYNfGmm&(hA)_zcxie%>gaLyZ@gC=>_Og*iEs)07chxJa4T>Mid^W-)MN> zxkGrilN$Z0lB^YU$9wFmrR8GNVo$~GL8{wLP=pKdeGt1H+syx~J@?VUscwjL0BW3C zV4Q6`+>wvbpB|skckjazJ@>c*$q;}>&%z-gqix}`F))`y1yhWiG1CZFOy>gp=) z7TKnt!eR%DBr}-$e*RbJHlt>)uSVTFrOijHpmUPdRhy(CoZ*0T9WM=2ZeKe~{vKMc zH!{-nx<*5gqC?1of$&MGY(0Jy7r!RA<&N$zVnIO_W&j+>z&$B1#>>0SpkC;!^Fu_* z?*1YF4Ya72xj_rZ>m2H&wsnOEzuSpws>~9gnQ4#85S(OGZwz1)e+qmq{WR!p=%QWi-Ji85v zT9rUU;oK)ta+ODqY1;VE&ts_5M}IZJOD0j11Hhec+Fk^-k-HqdCTsUtvdQkx`>ZTY zhrK6j!25{_@zYqGL)<(+wTJ(_>doP>y9#rm2HS(nb?#nJnwk-?o2|_-6`_s-& z`i3{)DP_rVDvp_!5wwcQaTOD>i*9F$Y8wmPzimfc%I$X858!ZhY)FVlxnND@?OsR& z6h+Ada3Ew}W;TL-9CO~K-6}xB@lkJOfT!VvSZ{w~NDDb$e_}(sCI2EjwQVzDwi$f| zqI3_;0Xgw&1H%Yn{#?rL21)&MRO2Gn#UDN^8bYQ;t zwo5hwNWc8^?AZ280pnRAy^joP^mqL<%9i`|hL$L4pVm3V0*2oEoZyPP-ZPNClj8+l z%H&t0?&IZBofw~a*Vc2nE3>}1Z$-B#mP#b0cm^u2e2kG-gvm2`87ai)jup=Iw6v?N zKMm?=silwbXG?Q_A^MYsh6pj!PEz9bMSc6beJq@mr>-g1&b^!-z9=y^c{MahHdeUd zbQAV!!E++7dEZIV48kowPoi;=e9<~2rLXLN?ha03zTHGpeYXy`!DJn{kNE`gEBf); zEA8h;8_ic&7ajGE(b=sx=TJ@#fnqY-xwlI!CHFB*e}hm=WuqO4cIarQXI_um6s(^9_|q!bsy0 zJ(}OQC|Ak2!@b{`k05>+xOg<{``hr7Kc8{By!%n}6Mo_GP^YChy}D+*Rp9Eln9Sqy zgUa|9WO#C5)E}Azlusq4^*ic+(Sv7ZbhKI>8XBlwt5^V)%-4(z1A|i%&4ieuU+Dv| zPhqla%#HkKS1GeZNqE1WqX_~x)lx<%;n`~O6k|d@SW7@Ziv8o6pJ1V~>l2*WY|_ix zbH2_Ew<51-@k|LH;mJn^l6k#jwwle~w_ae9_NXkep1>Jq?1d^6c)l8R`SOK(3J3x_ z%eQP1i2x>M&FAbK9OzPfSCNcb%kwvXe(5wGn`k9_AjOEn!{ocv z4W>dHUAAHcJy*%FQ_=J3g!OKW`ktvs@(wcZO&iN=JDh*Up^{$6FH_T!40L$Lq^#%Z z*}uE9yAL}oyVr3=W<8a;?{k#A4!s2C7rr}lQaLdZwDD^uvdnrq8=$}z(E4htp2yyK z85AFc=RJ=G>Z+4>?_UtYQDZLzJECfMuhI4F%1Oc6cNwvGGzXUkmD*~JaDGdAVpKqPK0)nt zhxLYparHvO(o*V&vC|*{iM_C;^*BbYmgn`xP1t7s5e^Pq*k^;EZ+Mh)%LD(rubVnq z!yKQ(PC`qKdCY0kv69px{6|(i+xDeO;AEY;F-#6%x-$kMU%+$w z&Dz~yWG|*UljFk~tDh9Q|NEofG(i(sDqhNQ$MzeANPmd)Y5UM`RalJd*o)k#t}aIPLObQa=N>Il;*vR0g6~; zowMKk`O#8qojh-4ISSR(Iw8AWRWQ8k7+}$;1H_lG!Ary1UbLMXRA-Y`QZddUaJvW$ zJW0JdT^O5LDBCGURl+cm3e0d8zV<-62)ehdX-qLAR=XL$2pV{^K%j}=BM zNNsDX#YspnGiIz^+B5f5KeFJ(JZS==+z$5|H_LD(&STQuK&Vdcmt_DSAS#B8Tq7({d8)YSTf z1&Ui2DkZ^+DAxYu`@*YZqEADG%A|}M3wclq_$ePW?+JZDL13;AGwv9z(^)toj}pgIO7mjFa0I3Ys_p5y3|%^+=qC)g6aE=iVaV z9*0e+d>=1*oSgD&h2-7nydHR)f&O`RsU9-64ryYQTM8!}9fZA#M*oJ3Q@z;f z+FClXYTxVlowXdz=EAVHXWSpw)b=>UM5Qn4T7|7Gad+q4hhG7?5-u!@+$@Ff?sJQE z(H0dM$qy=8*TO@osV|0hCs1pv51SY^5Tcm~>%sXJx7}0t^6A=!CBf39ZGR|T&B5WbEdvG8xO6lTzhSoT=Jk}g3?>P*DE zfWSoJ0fw(T<5)4~j9OOcpO4Yeb$z}7KL#Ehiq-NPdXD5cvix%)gataIq*ty%8UIE}EW&u~V9pbv0!od-&woIn6IV=L+2 z^MVRi>%wOSgZ0?L<- z3IZAX931&2Sz-WqOuOD)tdUzgU?9K{&~g^d+Z%eqU2eG}5&V>wea577&|i=Wvtw#ch?E zpeRL57lybwweNB1LRri0zYg8*@$j-zqkjoz=!L@4;&n%0T-GCC@vpBpgkeK}>_pko z6;$`-h_^|0=R=2;r*=oGgVm=~n-Sc?uSsAWV&M>FUL2mWNt2pS-a8>Hx{F5~?* z_=HU{#?e?PO9#^xz?pO@F<|r71^3(CFT5OFPV?9FYY9pP5o453=3Q-HR^S`I`C_VM zmuuJ^X8}SMC4-92rq>|AR>egAP9wMO8&jAV(eg5koI*q zh}){hMW-GpG_~{~6XKg#dx|(LzSY1n3uGFI7gySwTF)9!_Q@F93I3)K*;}1ab7-or2ZrBFMf90A%{mNE0&dyWk%Q^KJ@sWP4&Scnpt!2 zr|aruIlXI_93EZ`Un)R4RYjkcwBo{%4jiF;6uH4k_hO?&>KfO=fm{N^#!4&58gNgT zz$IPOQq91YP~np!rA_F`=YH8eMquTDZ>&f6TgG@%Z`FRdDJmFZx^wM%u*aDI9MhSW zl??nwPJ%Rd#p6Bdn?YbqP)Q zI_0KYw=DXJSeE40Qg~6#jEaf|J-r)FbL)JI;OfRiFkjeGUHPk<3z*8mYRvS9#4nxd zF1EhDvwpj+t=A2@v(ZYm*a&AWLJs_~j@FUW`tZ zo2K?mPHl0STBU_7qqP&uKD@Pbe6Hx-jM`vk^u>>FI~9&^aq!;_Jv;(qE~tsXZl0RR zwN+7*OwB;-`(8V;9*&Z?$(|{eHPq$ z!En_od~N^6ZG((-0#Rcd4+tSZtu9PocilOOrjkzDk>(mOQ1E< zlk0F=507I&!vGz$R8&=!k~C=u<)582sq8VSRHO!cH7`$+&`5$F*!*c=!Pm|QLM?a~ zF|S>RazIphk$D(K^UB<6Zv>!|J;1V` z7;snca`CeX9g4`A1f5mOB_D&I3JZ(JZs+8H9>4*MN9}N{)kFe!~mQ{@$^LSTXD~6Mg z*lK>+y8cQo1$hbqijs{3R^iguTxS`7T`DyB^$OU`>w{|8F&I6E(|H9-da>@5R8&?R zPslKfe{n`_7J=OXf?(qElIC!>wWth$?+?Ia-`o1OwkVP{i(LXU?WjeYMJK$_gvh{a z3?0%>#l1ic16U)WR^m7h76k>W>kVI9R`M?VmhHU@KPstYkSzJ=D$?wYPg#SLfQi=L37Ayp zj$P$oKmS*-b24;!;eRNJ~h%!UL5lLy;w)Ea^YhG1D=93jiyQzRIvrt< zx;Qay;qsIaOpvm1Z8vd*^{mk_AFeDP#a|}M0vAvIsum6MerUY*K-$Ih)E~F8^xexI zgQEor#Del9>D5OMq?i4Ehk9e_i*2l5+=l;J3&1uU#=2V;mI4^%)&(Ej^KCThq6vPz zO-U|0TU%HJc1u03XM`l5%U54%=i_tPq#VAR2Ag@7y~7nj=C5tGFFEsTW@zV5xMMHm zb4FQXXS@1P^$FXoJ~S)Ln@Bo#LWz?3LDHXP`ef+@E-A4gd}sS>xkWk<6-NXp4|?{*i6yjjeaUkB8Tm*;4Lv>?I;yrCr9)vd($-F=QS36uHP_|{_K%0C)eO%(&c z4yDk&_?5+T^H+K*)~$gZEhgitqhD7U*p;Jn1v9( zq=jvBZ?`ixmco3Ic(h9pmIFAv+VaWxko0{Bedh6=7oal+&o-Dc??ZB5ans7+UlsT= z;HUc@QYwLhM@l$<@kU7%cnm#qPZiyv*!}&?6ZnDS&O5Gngi?x;Fy;Isvq%XvZXe5A zApaZG41TZs*x#%bIX70Sw{$Cm3Qd&!4ydRFcF{Rgd(3gW*UJPNF3>F>QBgm*q$rnQ ztIX4}*X)*ox3{4GEv0|(5md5!sVbtbt}bQohbQ*$O>{57fw3)rQ1KY``T~we zjr=n>P(cH@pSM7D*1vy|0+EXmg1^HCErXx`bMc;e1c;6N`~Ls!=l|>H^NWjR!EqJ8 z@9y9JKE?0&=ximj=*N$DT*Klu9 zC;#_40*~K&mf3%*Jwc=#|6LS_NYcNffk6H{hmffCe>OAE${2M(HG(Qn_rrMv5wEiQF`F|Mz_KO5Cm2xM0De?85v4`iaW z{OtQ%XpX+y^}qofNQFpAfDhT!e*za7jkNue`OhDB9c>H@D1Jers{U7(7ULKg8DpvH zh4=DMLz3bZ%tJcwgW3Z3A=2TXF0T?Do%&1L)=1P;kgr|tA<|jKo{NkmOJYE%KDW0U zJCF!V{Oz|=nD7^(EK?RvEtlQQ`)ycRF{m%o(<=hRYCzov6Qwkp2bDG)c?{{H=jMi8 za#K+FW$SyH9i~yE#6V>GPa4TP35fu}U+KE2!0OXeQhpYNuv3AO*5l*j>o6?tyHR*x zu42D_r9f=?s0b*Fz6QODdJNhv$o>MYeXppaL-JT3q$7YCh?aQ$`eV}-KB{1lqIQ#$ ziZ#gw9XgBL-MeM|XMA3t`@hyDw87KzbrPQJ>^WmIE5ecUgo(4+^hD|COk=S3V^q*W-Z=hi?I3;AhA3etq}MMNC!`O zL?1U#abejFZ3vdWDnNiJWq>eX@YJv#T^g2K5RqsUpkS~-`int)+W5lCq(I77`nu`$Df4W-d<%*&9|??w@_^grygWaJPNYM`3ZWep9QckalHAFJN`TQV(5 zx#>!%`fBt>)!n_`ii_yZEf!0Z$ao|sCKi^=kuNO%Nb(te{Z|+2SC z4bFJzZNJ=#37^_Vy*zbZWah^SN9EyE@H;}*ti=4VY3msw-y1i|2?j^+e zpCd+@V7!ZQW?p^tFzLvNGQBd?(K9st37}YQ#T7Pld{6OSsT^I)yCc2-R?6kp*-)%) z5VC`VVJeD?-??)u>M>CD{+Fm;mig+shYDv|msLGRl~G#E-4@tZ9c4~=$;=E}i4>N9 z4avp=L$8d$#j_Z+o<4nB1T=&m6Li5O1^3as=e}TJNl6YlLk-t}%x?H1~I zeOnkCTKlI@IUc7|`(Tae!P2!AtEj6#A5oM-ReSPF)PHy%4o^3wF@n&W51yT!@fbr` zlEcHo%w}rhSBcKDELsiXcdreBYC!zVhnrg81JP0jRXxSRZe??TPSO9okBPh(w3yq* zEb%(?`TMy7-xjQ}!YzCfhty+W=%gNe`2tbe_&obNh>(bAd~S{rBy(uqxaF|1Kl@wH zTs8#gwVEqgG!R+%M9FAxECZ-f{taL<`5Lj1m-@iKKyRZ_M%vV$e9WVxzs2t!kGPMO ztY7Jjq|7Mm>6Nv&i-NM&j3_mc)-6*jYdQ>J(rwC*#aSpTlqAb8LJ1V>`CaCs1IE1B ze=u}=$~4{h(_@G^FvB9fgaibNmX<~H-UywwHKvh})Y$IL40t|Cp5CxsXV+G)YC;;As(_ZJpFsdyeJ}S=6oT;B7j{?AVr{P;$ zifU@{+D!3iAN|;D^uCXH#^tZ>dBx5)zd;3PK`lQ0IJ0mH0Wq@`+A^K=QkRE-8ZoF4eh@7>l zy?giW7z~ytLjDNBOYO>>VFwm$)a(P*bM&s5s{i zrBMeCD^rt~m+yqax3|aU=1PDP?atP-)1Q`=mn+K2$wg19wNJ$(#`*kXMR&%mp@}X$vbWER1`I-I=_kBtm(U*=Sg(JyQU) z@2`-j(b*?(SP{4})YyUmer1~rPb|!*g=BL9Y^yuX7~689xjc)Ddj{?t*#&o%F2~cy z_vL`mghxiw4qUqnq~#+`=WO{>qLW9r#?}Xtt36IEc!?vDTlO9jalP>p9{zE-7OxlA zu{5-}sIN)F9@v4)`KI|P1+xj%Q&CLpa|D^NPyr(Q4JRjO0oZsTPj_SHY$DxKWt5DK zjo*%QOduCzVeBf33J@u4l=SC<$yg73ztUlEYg=~d)5>Kv`F(lWP^jOqi$F)HNUf}8 z3vOd&#c}g@c0h3B>iP?xLaKb;_V)J3VFJM%*VNP$n2v7)CNbSB8AkZ5<6~p_Sy@IB zpZM}d6O~9l)e@d$r$M4Z01}ah&Do^?c;B~V2P!1LR)sVo#W7Un&4N(C!xCOJ&Xs~0mCisMIo4|v5)vx8xVZfO(f(&2dF8Xa(7Le0RWW6S zfkS=bCNSOV>-+m*i&~kk;1&}#5UjmD8|j}+tdUk8yfH5$MEvXR2?yRA;TTZ!D?)usza%> zbeB#PR;%%^J;I$yHDLI=3I45vh9oh7T_tRIp9yK`B~N%KZgLV^w+fO*^2%yrV%sXL zp5R~eC2w**+pjCrYxQY2O51dyuGst2@TPI?O{R0O^Hut01N^{kY-Wb&r25%zO0bJg z>Sjt+TKwGH95|gSKL#^Avz(cvg(JdOIHxSrPXu~;dRS>8?58&pr3e7MNW^~nq35y_ zjC~&Ie{fM$T%0mOhMRkI^yaMQS7RDsng9I!d=TQhI!qG|rX8O#z7s|kf>0|it=}tP^833dmn3?SoVPOJ^<~p68C9VMdRwlZ)Xzj_b0W?$@)9Q1LMrP>4?vW>7iR zpY&@wZhCq;q1zp?H^-`HdSGE8V6xESZ6E2`bkW%u(8-?I(CIn%rbwn3e)9&V1Lx$M z=VgzzocW*^x>ROulwM;qM|jjMGvcPBGnc*aAPg2+e)GP59tRt{tBH$;I3i)+mt6-M zJbyd<3QS4N{KnRq0PC>lRJd2#b7ErRvVsEXvxX=s!4;AXH<%2hZIuW9{w>(@L8RZj zRz_`=xrd}p5@cyb044xHAa7l;b;&xr^5Nv-N{Y#l3aR^8yRZ)ndCJg#5*HiWV^hlj zfR>SwaeHr1)yOCd=;idxOrCX+5dfqV`Mh&{?<{y$W8=&$D9iNhK<%=tt1F7YK75Zj zvO5c4AwcpHjY^3|*Zs89VtuaUIfoiFkb^BqcIQyvl2kGI|$Eznkxbd#-BKB1;?hMgYtVtZ4;~ z1-eSgRXvn&m8{x}HX|X!4y4Rn8J(0^Q71^HoY-~cOEs|ul#)|Oh?H$OWIW&@(b(kV z^9nKvwXkw*_%+dt@vLuEe!dg{*LBP9IXS<)9%;sm|d!0RFC1Co!7{A5F7684-CgpeC zQwFHm>2B!F%yQaiQkh6%(B=7C`T0h|$+W&~F{rlw0+<%Wg;30imAUzF5~pQs!}hoQH%xaYoH zcG?8oxCt@e)0B4`vPp&RXTC*MCDf9iPnRM8cm>(zyX3n>1_YBC-2cH30Y?Cy96G}F zhFo_hajUavn_|rP_%On~F&0SS7hrEhHk_fqi-~yyzXm2yO0eD62k7$wP)^kUAn(NA zE#62&n|On(ixbvJF#&#;ouK-GC1D)By>Q}IHyt5Rx7vE%EpWlMJ^rX2sH`in&oi+?6 zcjnXWxgV~k0QXd`($w-L-SxNyN$A|50w|F|CU0qJ*_|qH+JDm?*y`^)Bu7OR_VifY zm$l~F&;zVR-7TdmJ+tCgyjdx!gw#|L$Ft%C8Ptt6V|wTnm6en}yye$urk4)S;rLYr zKT7%Ie*XMP?{6dFopncaBI&tU8;$2>aW{RF&3jNes+(uNVdYK=Hzie=bDBn^O4AY5 z#Gf0tk1etGOqvM$%tvkcT7cS&eSbN@3g&j@j`oC}m(fG>8tj2^`~Y!J63uurOWp~~ zkXT%S?nWVX1bfrnCnShJ6yxPJfC)N`z#@Cbs3&-BAM)F%N|`kSJC^ghI7vZL>N(t5 zUr?d=wSMt_;64BM7=6pBdK2C1V!ze9SyTmA#;J7$OOSZ`IEoX~@|<7>LVjHcd^P)S zQUd)TtkB69aZtK(u+2LhWuHY88ygGAmfh!sx#y5}4lb@5e3eth?oX){xikme#k%7N`*b7>@&ADn$pw6G5NyG)Kx@hrQ0CIa$_#3dy4L#l`Oc zm?;BTCHV1If)SeGT(iR$%>&8Yl2~v~i)|-9TIZ(X$naW}j|2!58x$Tqn5b?jC#rv? zTfFv$pN2HDTR;*pKf{)^Fp8=rSmmD;u3>n2Mh1n&uIt_`ecmWRB^SF!d=K_{`ktj$ zanisli;R)f`I|CfcsX0Zi|%Bz+}zwQ0cf6wb?C`76BAQ}J65;{DX^&-4RY%Z>hIrY z0p-hYZWi3L%>v)#eQ?NVGDvURh~C> z(462J2eK6gTy|}E#k$R!U<4RfpGbG{N8MVZCar3MG?c^jTeDz%6%)2G` zuVXGXpDv z<45B>WRDYj#%*eg3JU1g;dEc4D87nVvLOuxxxoz+PY|3wFdf&#ljMGnU6;m?kdS`Q zEMRa8b8{tJ8ol4L*DF-J^l%w%e<-ycB{}Ub5-*0ul?w)^ZM=6Uk#kB{1*X+;s=TI; z$JBO}Pg`3XoRSRlJd!(&d@y?A-tN{c?<5-%8ae=5`~b}A7;t zRGoye?*;V8j4e@$mXU1AM~6mn`E8F@79Hla6_)^c7rn7S&VHA@Y{ZJ)(zg;-gBY`J zdo!-&lefv|9`T+`6amiCSx_piD(}&^QX0+3)hFGo^`(8{Cp;1K5k`mJ>1B_S2TaH+ z3im1NyCz*8UK|g3IOHu;(oZ#OdA(;DV9Tm0&KvvpIr>p;#L$xpDtRe1JIJPJ52-j>a3wDa?m8 z%|p&}Z^wFs+|nc?R^AbJZ`EvSd${Dq26ph z`hb>UJ+tag3K}1l{%oGKiWE9;Q4m-BV{IfW(pwRhmX@|@$3q-Ux+mNyUAR{JYrox@qcH?9! zlT$ge=y*uN$^|DrYk`|%HH!OaOlCTkR*h8Nm&++nU6#wAdjIjhu{f2D{*pUNk9&2= zN_ihG&87K9d)qrB+@rZ+@=|$5*HOUCgn^jYWrSa9qs9D9cxIG^6!JG?vilbt2T zKq$QruY)S@&jhU`)WRIdpr7V0`6?;TNX;IbD6$2fQ@Xu z(|J8c!;#Y1Trpqz%1t@WI5svp9`pE!G{qu@$!MnJCt7sX3G{sz=|toRhxLwRp-+$M zADxc0^rDXG>D6bnZN;GS zE}iuXQponGWzDpPIYg8`I|Oaelgx5xtfqLDO;IQ$Q{r`UaKXue44%1i`(q+o+X02i zv!ml-<9gc00i&EE-mn?R zvh&=o3rDK_IeHgM7$q*b+FLux#Z5)yg@&z6LGG}=X3dh#rKOLdyoz`_)}J_2<+sky z>tQdY?E!m{cUM+>#(BK86`VV2ZtQuTqZ$j~WwpC_AVHY*qu6Mb?oak*9vZn(x8(i} z$26Ncvbesr0XYxvg$-{;EHNNh$A^bXTPs#0Ld4jf_nsXHL_R1uI!^(xppcvcMJImt zVfoo3cu()|>2D`9xg{&bsp74Vpz72Ytw(#?Ml)bmj5SoZ91e46d zCHo9eOHfEiwQHs2jUv^qq?OhEPa=smX3)I6ybwXPqNlZ{qel{WGjBXYR-O@YPoC;b zv}#O2Noq$pa=N>_vk(~T+$RW(QI2y~j1x~dRQllD-nx@D;)KLRp`0W=EsZd}S&wy@ z7u?AD4Y-O$>gQ}^Uc3y>*M9ZPG{< zXS_!JKCNogzgWWFdX8Pyh6*N6H|*VH4$P$aR!$Wga*vbC)H1w^KwNRH%~~~p{668n zqfUIqtCbms(9G|dbW$+qidUgt3-R-1g)!>;7v4HM!^f$ma6EBb_cTd+;v@F&PC6U+ zB4D)yuJmg0HT@M~?JTQYhVFb3Vrd&+OX0DbbwE@H1FM-s^VRj-C!Ef%3U<3yl@fA$ zZoB{Ba~AJ!)U|;**XziiY|%thE$u4WjxzbIyOSp7N-N?W?0ObI)R>wKsd?4+@?>*c zrjT#DLEb#+kn72jsZX&cHB6Peui~Y7=ylsgU~#_=d@Rm4-f8;6Et={ZRgdCKiY3vo zP;puCDXUxzVxvWShRF&`|C$bNT6iosx_XsEdX^8Lq>&gZiL@>*nS4H!!|bwkW6fo> z66tpR-8|`%V8}_=f+)^ddbeFQ6JPvipDIGnUe>CDHp>Re6A^nFmKh|>kksX&F?I0{ zng>UsTdLV#ZXJ3FE`ta2h^jW@D8X+-C!@c>=@rhBXTXI7})|O3zG5xWpV!dYT>!B4T!~mUTye)ari&KLiC7( z{vF`ox4i682Kv9RKfgZyo6qz2t$)A#ND)t*1DH+@LLKU>Ogd1={P~5%?~0vep+%%n zxCG^)_s!^(*jjo&sUn`Y+0=(!=b~)$-rcy|tsWm`M2F*OvlFi{d+j2ucF>d*`X}X@ z^J2=B5OE8#sD_4Ote018NmLlr(hdhzD?GV@fq~52xUJ*Ti%xvh&?Bb3+vZ71(NyWB zsmYfyQ!80CmPO+sq@<)m;~&cDFl}nzqu*PXbQTuio|B0`Vm})8xgt0#ULi;uO>a%- z6wE`deY$TOI#9*A_|?2T+ULs>b~^DcjU-bw4~=H)d}*W)b5vQu8C+~M0r2U?tiMz6 z1`qw#o!1aCze<`Ui1veS`}|ij&;OYz?T1#}e;4SVXQWd1WS#pXdEh| zhfw+BeSC`y4sy0Rq*je@F^p*1O2y2jdduxMUQ#YijubeCVgsj2B*oUu}RVPv>>*AK_^dzm<6eO~gu zle;dlb4w0>vUY!2JUO^norozZfTUW;f0^ZNq)5f$669%rwaa z7?}T4*tdr>*#`di_Ui1-Aw{KVv~n&jsT^LSav05dnB_1zYz~QX7)m)733Do|Ips8& zA`>b~wzRaDVbwx%Sco;l9KQGH_x zF$7D=#IWL&7z$%oifMyeb+HaBv`M`6S5e(P{=OPb9R8eX6Q{iYlK@}VX^$(T#m!$> z;;~qc!#R)q0tr`{PRc)6KNKppx_RZLBf{p(_aBcOnps#Vf8WT}MNI8z9k`1wRdsY( zKt;vWIGq>n;jspSoW{5wx%msN5lsdiZj|PyZq@Swe(;?GC8c}L^a|+1l#DxV2EV|E z{A@Lngw3XxP0?`or7DLbl9fcO0hsDN;( zxrX?xDWR-@Svso(8eRWu#zoNoN1@V`j^+|yfCGlF>1qDc`uIgoa)ubY4p?eVxpVz> zsNCn@!`;HW_H9oPT_@QT(_zrs#jovHr*_99K6uw&u5ro)*L}uVBR@7V;{cpV>+rJC z1Wl^9-*GYvu4qz24$y)#Rd;OiwrxE^o4)3nyRQ6Gbv90Kzd{zA#ixl~ZgR`6!|@i8 zxmflOR?@5oX@i<$Q7>YFnGx^ zNA@zjYm@KnYxjL71@5ZTQN)~6+Un$i1~4eA`76xozoqjsSAm3m1zVYR1`hF7NPnU! z*~307gL*T=TC(p}Fo~2(|Kv6(p&VXtVrx`F>!Z;3GH>e145=PXQs6%cXM8!?Sr+yz zD!L~|+#~ERKeTICd6a3e`9a9+zDHbQJ#oE}#gv(AbcrUb#?BM^|SDb94bTi4>oiKRuI;9;k;}vq_!!+Ith}D0G z+;ETs`M>|0s$drdSnHhZhQX;fDgzh>Pv#^>Yv2Z{p%hH3ObzRrxpB zMSuP;Q0t&{c^kBgHSTd9oql(VFPJ?{rD_V`!I-TwB;^tGtE^Sw~X*$D}L@UxUAyP<8hg_S^Wr0^3q{!kj++DV2y zqSd!8oPIooqWF*bD$Yl8LO$Oan?mea=>G2=RByw;8nO*mq#DiWEdvpM4%r; zpH$3$Di{CA5*>x8wpMIQD#nsDdd^{Nrw^i`Ic3mkK9zN=6x7N5ijTA=HSuP7mCN8w zORUy(h&?+67Yt9o@XYw^{hxG$z`V*7d}#o~BIzbPTAfUw#-D#Y{x>O&?w}};+QO#M zOnmBab{Hl5lewEw)@ax3MO4S1TA}HP-x_^OsD5JAV|_>EpAgBg35oI=i$&Ma>bbb< zhhHR-QdAB5@ciJ~G$*BnN3`WZbL;M$k5BWgWomLd-yE$DIE45Km zD;K<#U?PT3xw^ZsP2sawa~CWiaXH#7{n!0yuy#J&y9#6X1*04n8pvFj{Py_+Ur|Xz zIV&wwLK?Si3J`Hg30ERedM*R?1w1nN6Hr0Wd&P`A7cx3-D&%Ok2Fs5P40wE$X7HTp zT7*0MQYXU;W-eZC555*@g1BPiQb7xy9QduOAkEy#PTuz}KrVjfYsmKGDRbLj+qy>p z4UBH7mRHr{o@SMFq#;ksng+|*4NJr3%W|$}-08nKTdnX-t6!I{ZGO*W>}km z*Sv>^dcBg!d%MN{?qAc#jy^PQ!f`#F&|o?2@-YGQGf1~c+01!~QfBygJ?6=RHFU>t z6P3)HHlS-yW_UDqvvW3tU5qZkAP>%&=9MgR%4Y*#AGuxluPud|JS-gthi%yI9=da> ztc?6Vo+2Dyn#-g{6GpBOm)mb-|HfOTF#NiRPV4gPg3s{9^|M13!2YP%fYpJT3WJAP zNuBOZ@ptjEL$T&ln&ocQY?&^A!N-=D3joina;SJK`y(RrrRMB~E$S$U&#cs!Hoc8; ze@%p)9_Cy;GK#4-nCK9DXM1y>J(chC4%~0c@7~w;o{^w(T|Z!Q^0Vs8yRjOE+E>ux7LQ8u&5y+S2h z0175ZmIKR`!-az6oO8_N?cMr#pS)OCPkz2CTt_8f$Jhtdz_rv1A2A++nVHv)wM{4B z(NQ>X&m(r}+;~LOj~o6344_qLQ!Z5#$gz&gZm&6SX5!t?yUT^m-m2?pIxk}ibVY^M zLERGF)DFRtm8VnoQ5!D<;t6skwN37$gF}}A=>b^Wy8Y~>{E;yp0^QUz`JM5ARSiyy zaNmmlD6`%4bPt!xo=~meI8iO_LgW+N+}w(8_$Vj{Z!>(4E|p|`qKtG@-416(Lz@Ib z5Jp=xU+o|gB!*v(Ewq?Qw#_Sv^4QmY6j->^FZ#Ym*Es#o^ZC3UOWGWeO*-sz>VdiA zXw5_(nfr;!+;KY;3V7gldeQ4j##u|6Gy z!wfbvVPn8DYI1vTE!Ey@4$*12v_lH2tY>JLF{FF0LMxf%b3ZDv)kt4r-MXEnqAWm0 zsX*7oN;!4tDubu=5q1aIyUva!u71MdgIjLITK~kcp$uLTJI(N(OAY&AR38kk8Bk-N z|5LrTzW&6T`s>s9rQkvKknSJ3O4`kHn20kdm6r&Gq7rq;tV1@=jCZp5;Lr3eF^dJxfw3V7FK@iw1(ub6R3b zK@Xu+78c5SvCVRES}M|C2Sc3Y&u|J?9-MMO*(zc!gi0K1&=R$A#{hx+7AN`l_NT9) zTk+^HuCEKh)JXC|UI{>OX;5T&+$bAbJ9S|3zH=Qzyds3X+p!)hz01@H@E+@&zS(nq z^Ebg9=xAa%cDielQbj34wY^|`S)YSJz9E$O-Wv%Ay&8iI>p7W+Gd7a_OL#wwT*`2; z^hTbg58sQ4Hoprw`pZ}@e$ldJ>`DZJ^Yd6(l7a1%mZgbwebJuyS4-`bN*WSRxu`fb z?dlnZ-J5OV@*fdbX$Unl22>0ov$MulTn@T%RxYhwcIT~t>62Cc5sUj7@#Ju{C$?o+ zumcF7S+OD_sc`eKISd{&q&wMU4B5PXkNC##x~Vf5)aQbBsI*6JFVdlW%R1q5e|jiJ zYXOJZd*>`nPTXMWd+aj=GdvneINm>`G^*N+QgS*^)NA{96Xmi_mEt&L4-D@0#vtZ{ z`NLNi|CNxCpeg-*1=aHWyccqp?2R0`N)OncuB9b>;6R%I555y{*FDt@MRNzvZf^cA zB~E%+oUBd1IGZ`I?0+t7^5M_imy_;lN_B3SrjebodVsh=gZj&A} z%U@(?%ld*xU)>Z=d1;^YU(cx1p?o`{ofG>yYE1e_J_cEG+oscD@i_mDj)H2k@yYn4 zOPkz`#isE@w(al<*3;}!M@n$k9qwQnOm)Cg2JzKJ*(($?a<6=xHvaO%;i`H=o@0-r z9ce6}o-2=-cf4kmqGQFh^Bq#$tSa?L>q|5LsH;4^FIa3uQJ8-A=Q+xY#@`~=pds3j zLwn4=pix{Dl1>2Pi}p@U$V_AHJK6UNU=NRquKCIoTuOzH%E|SUkPxKpE5GA>GAB zfE6>fgjSW8(D~*409M*CVapG=R4~(H?kQvB+p93ZYulMW*UX?lq}BSn)Z@Pr$g}{! zcWf@T2h$Wco3ldIaJ>i9PT`jOdI<2az1tHOLw>L@Nb@CY?-O0tmQ3|HMIZHqaktqK zp@ATVBbXlng5uzcYU7a=fnFNt5O2sJoF#P2XnDK<$BVl!8TA|j-pf=?RT(-_=ZSj7 zy%V(=w6~}z3rh}9uORG8LVT;cR!bmF7^E9Sgu!cVCPNm^APIn9k$-yz|CFA<$h}Q( z^755L6i$`DePzH%i)i_IieL@RuxdWQ{hIi{b)q%@e^q{@qPu#Ph1`7XLmrw2Tk?kb6Ga z!y*HH?7+V){@L`zq<=&#>f;63opu@TfN&du&iAQz)y&elz;9}XxVs?jm-x~U_kt0d zl`)SY&E{^5yq{mI?SgaD2hx`hI~`{90)Mxc)L8WNs3I(%<{lPrDLwB*=vKhk;Xs@$ za}21?4ht6fK2%pt>*!5$m5ed}*_M!n=Fw8{y-U&6QC1aV-LF^tAkyv9x^z_Pu zYV1nsnZN>3pwOR!Ys9iWmiDkhRp=Dj@xiMFBM+`fHj=rSB`TbV*7ci#vA|HV#l7*R zvMA~^c(kvCcXbH1|4RH_-#i*6w*;9atzvPth2vOfL?dBz9{F+W7;GO37-~^mHTLA& z{tJ2*6Eb;=wN)DI{*$YY=so4Kxq>0x&(%N*4fjSmbQqdRN5{A*VEwc>f^guUz4|*6 zhTGTp?xN7bxiMy|E z6kXZpNu49;Zm&7UX>0zl_*e@EI^YgE@BZ@3@f{-NxLrtbB5Hcx8q$3sUw+KdH0E@u z5XU@LRUvW9{hiQF-!H^v#S;_ALvREQ`#$vfEh33b*3TG}Fj_7?D!*>Dol=|HcoEst z6+)flj;sm-ej8&}@&Gjej{{MnYa0ys@7NARwKV0$ZnJwgH@)Z$sLwgFCYXKvOC)5j zTNjXdIC&I+TVU{Iv1CkN#l^X*-T-N7QTfLGON0>CJx8={uBL1z0Bp`=G8@n3{ZZN$ zBfeQ0i;g=RjLOEcOTGlOGb%ksZWjB}7z;{}yiV_42a&J}itg%64BTR1vjZ-orKPp_ zElLW`1X^q*L{wo=?r#wJuIttjW7X-`ep|#3={o5jesp;2et0*fGJ}-5R`FlmtE-sk zKv4m5=yj0{`+v&Zpa`h_Z~2z;_v$J9c_CJ7`~M}({;%XKI%4h3v3nKi6fMUQ7~B`E zSw1il4BplJ=L3FSU&Dbv=I3*PE!N`N1RMfov%_Z6s>y@dRYR-t=r@3_L?t$Vqex1P zQUpLkQmm!&{A@R9DAM5@;KkK)LgN2jo5jDO;8Q%oB|K>MdIg8vb*Fu(kef_;2(Ujd zhFzxtw|-&8glWSwnqyY145Jqxe>XGnlG~VD5}tV$Ko|DJT#pwM=Bu|q0{~Gyrbuu+ zQQ&q@Sud@_%echXf%b-0fY}Xz;?tfcD*^_*LDlG#B~zJQSIln^12&;|t#79mT|0ma z!r4!eOZ^d%vz=2D6L%650qA=^VLyMBf6`tEa1emlYz_>bX9+zw4B*=5gttRSk%Z#+ zqC5sr8((UpmPFkRlwTd9U9LfHXtV_rW&t_1a134>1@^L1FX`>)`4i~HFcd3v{y2N0 z4s5>oq?af%y0RiA63&bPm=eJe6o>cMbuwM5S?2`6WHLWFqgS0->n2!=*6O^}!Z|@m zOEhQjXnG7aK~gyIyxb-L(9W7rrcd$tSuyo))q9!VV^+|N44#YnE}B0#^b3$1pN66w z3H4u_sbqyOfk=mNU3nTsovygKC|ox+Ai)%&Pam&qtq=o z$^h~xr?kX(*h^alNO>poJ5`56`9mCroS<tvxk<17Uae^? zT5NSDgKWJKn;2anE7v~VwL1h5w0p`$zRtrD@mwk_zcVor3{k+Y5)(o61ehfS0Sbe< zKQ6u6VDvAUyB`JYc|(POXU@~LKgQK}Z3Ddd3hEG@QsfK7e0RD153*(B53IyYV^2W_ z!BZTeJI4CLRzdwDXD~Oi^fm1wlJL8-^@w=K5pl4HT{a*&ZMS!MvXD4Cc8?RJt4FK8 zb3{Bm(iCyU%Misz(;>?(Iq}zsQ$jDc z4$vXR_pe=&Mab&ZwqO|iE?6FO_a79i;hOTs>WgAVFHJM%qpX^zLav)Z2cI6;ifo=z ztM=sVG<_s9;N@YxFE{Ajc;~pi?5i2QQV?rb&PxW}x~tB^*irvu!W^D}$y7Eg2IGG^ zBJn!XB=T6W<}kQFwl?)oCwV|;H)=z0;* z5daz*$QG$XcX4cx@X%)zf^!EPh=AGI_~i2TgjU)J2q`4K#Pe*^F}r0sz?M2SFYnKx zHreCUqRFa-<6oz*S%Ucr2u=wJn}D+m+~1wBKeH227Ep#*t+eN-)RH01_Rkr3aXQ7P z#_3npqj-0_I_~WCr1RSDk0r1vzmHGU_hApGdeSIAeJo~*gXI%|3<-i00i{R88-N6H zkQM3+G~F+B6JtpEXIq&KXp|HB*%)=QWB%DQ9GV!ZMLG-tuJ5(n5|#RGE{M~)mYU7x zGHdPkD|q!{UmpnS4aek+Q$;iW$_hxV_dx&>d{D1SCpZ3}RRd)T6d#Lnk#V)_Wa=3( z`COl{nQOKtb3q0TT-P^0g5_;Npe+6XuAW^OG6&5N<>@{GtIC-8ydM!dJESt-l+8fx z3EOz6!bQG4>Wv>l#28u%$M(dfmor23j)kX??O@9@LGt28^kF|oC#}q*Zx7#>va#>8@mNLA0G#R3KkMdBX6kED!tsE;xbhKo_g3 zy>n8O8t6RHOaEnmfF&C2+Yqn^T)23n<9XF2v|OnU|QRE zJ285zAKo$N=qq!X8ZQ%s31Gy4Mt|jFPa|@$$>VHqrU9@K<6gyr?wKi-6^aT(^+C^; zdDk5mA9}psBf7lyVB_~go?uDTh9$*_c+)^(t6a@jeh#RJ+2eRbd$YKH$AKmveNS%>zK=O8`r*X7^gvf+E)@E|uF)CYpd~ z42C|)9%+Vw2Gj&=LMyGD0)sc?c(B8UqfP?Zb=;t@&NH}z2aeM>G~{u)m%f33&8H}X z5e$eDk?(JL?2NpsF%E&A9s$@ygf_fAt_)jLp7P|YC*J_DYz!EcY_HlryY*#{9aeHn zI^GzDRgJs844Ltq$S+X=_BeP6OvYy%XaLcZ`Y3CPg-oj=}9 zIW>&GemW%t5+_7qZL#^)C&Dh701p5o6If2lkYO+yhk*+I*%Rpik~m;h9tgTyqMzS+ zZBNP@(&d&C-z+^WoBX*E$VgLcm?DTHp5xNM0`qg!?_|}2Gd5vuPSs|^zWpXbnifk<`pTYXaffaF~l(4P(&uu88 zG?&r8zFLUntg{V`jUYqW5iI{H^ugrhB*?EUj2Nz%5iniEm}N6}IfuHaWkJizL6A~= z4Yoy`ZCP1vgiUZ_Rzye7jjPUb8XHe9%ueiGnu=KgTXaqXgz3tvD)L0`679eO2PKY< zm}@y`GI*oaEz36KelUy5x0h8qRRYr-J>M;J9ZA?Fyj|Q%8`j-^%l*8O7$C9#YF5z zbfDvEU?Wzm-S62TBAasz0$cmXy3eYs1R`q7jnz>J#`C$YAP%p40*IxnWwQuH#cCPr zzk3UOjl;*LKv#PGj4)?KF+8#tj=MJ5FQUW@Fp7ZQD+xclVt3Jl~f;8Dk~u#^g2U zyjJj+&l2!3*f1aqKY6OV6Pw`pkJZDfKQIyFC~G$!0m;ll%b%YR<`6fK|lyW zq(p_3U35>=A+=-GFalYiX}nLvX~hbHiYSYqU?_vMN?}iWLrTi(=PT;#X1>kMi=Fhg zL0XHghJYg*4eC}ukdyB65YZQeou)NBX}H)AG);IAd7bk;;1Z{fk#G7#M1i0Pf=Plv z6M|9^3dZ;{Z6ihh_k^^Q$h#Rb6P(oh&%X(3LS>ytCnWcOCkFldun2I8^=Ad(qdb3* zV|?>ub6kqQ%aRI4lS16x-Nnbpv$3&Veh7nY&zdqLMG2N99Ud8>p`;uf8j_lJ`A+-K zA%3qaSO^uAe{ODOMhX1m=;-L!7#%e=H7zYIJv}`GgDE5C8DfSkx!*4Z6%`eWR#rAP zcsMu*M@M5LqveXAi~pSrMbMj){t6&CPs*VyB_(CE-uk4z9W`hqM)ry8Lvv|bYiSyo zoN9t>cK7NRW3y4Wk8nuz;#P82aw=AGj#iE;V&>mHULpMTwRLq{ z5fHL;2%tcb{VM}Aalt%C#L^S*$>88%*SljCRaJDgoaiAmHaZc-zR%vPJ`T2QvcJ_pXgzD<*$KJoo3xc7D z2%Akc-hf!ZsE98NDt8nnCwR)k<9S5!^JaQaZ~Lx}&ZdRUmd-B3 z?-{3krb45kp^=b~(9+VnMBR3cTqOKYErj}DIr#u-85sh8_s7jHaN8wNr0Wpr0uRgZ zQ94I>xgoT5i}ZSRj6LH*ioX0_35;DMVJsxR*XIZ4@86l&*{w@p)+PS69H1CPGrfxr zGiqyVPZla9#Ko)CX|YmDlU4NWOFX#)$h#fus2HfEL&Dwh?rLj`K=u{&^fuPk@Kn9^ zsU&X;(dk70TPOt?sr@`SBm}H4HYo`z!TbnIbD1Yu^DbXe?AZGg4-X$1`?i9#JK7+# z@uyGD91R9N0bBbRZ=6Jbl!3$m);-Jj{rmTW@pQj>4Of}UP0uFH@ z#Q}#s+nVYVMWLQ<0@n(dMFdbESPi-%F5i&PJ<}SqM0oys4pgFdmZQ9i%A7we#`$_1 ztm&*lxM`;?g9^OW7V)9x1)*C`_eHzAJB~1@&?$j{fB*?eZaJHO-T!>C3kj*{-1XNj z7440(va;Z9I#F(a*OWsST#Q$nazZ?ByCW47JBpn_7$#Hq!UBefz)d83eb^r_%}RxU z$O90bAR6k%-`IaxV`XFy@UN7aZj%@o~_cjpxTAfk^7;c-)^ZwYlA);H^xe z{3n7HaecR4Jc1H!m-(4&^-w;l6K)y zC4T&k5T($UY7dObqo@?0y*Us`N3ofQ7p6U<)O&vAT}54Q?=SU``MYkO)aZACslPoL zeWLvvTR@clemo%Yza7Y#N?Fpb5XuZL_yzejYTCLAnxt$0aXk8 zv;Bt|oKyIpZ9$;jWxe&9A-9n}e)!-6%Tjv935Yn55T{ef80<~LaHh}DXFf6L8JU?u zm2V2YHp;g8H`);LK|JOFGI(9>HakJteKt>-bhjVsm)%qeXP)a~pNFMG($dmQMw5Jg z(k+AeKm4cQFpvw!iFg{d@k{~v*!Y_?UPu&6XiHM!MFkzXyLeQ9J+l%WEiD)rn31uu z*rX&Y^q*kieT%eNb_JCkOUqcK9NDMJS%I` z)72g0zZkPg0Ge@x&z=g;1l`l!JR%d*QZ_&j_{am&B40{9mrg&BmT8SwDZ89H82>{6r$ zcl>((#&SYc2p3e=r_p2sgnHr%nWo6qaH48V0HRydoDOibQxCIsL<=8oAte*7;KbrOKB z&GBt*Z2=Xlq^bg$iLc?hy8f$|mJ9Hxd5ag7me%Tgp}UCtB5LlA^FNVL`B)!i{P^($ zysN9S>H`hm;oxeOD-i+#i^j&{OJ68P76y;%2bsTtK@hB!Xwq>_Nn0Bi?Uuo3>^|)j z@r19HNdiWb2t^7%_QgO8=^m0mz?1d^Nk7rwEQ0h2OlOua$`;MeMkwSiJD0)&2Qzfo zthX28()T)r8)JS5fq*|PKAwzRH8=UM7Ori#N*>U)LPApUV{pU-BKIf>1KhC*auSrjBxtg9RCz*0x9)>u~ z4T=^-ZIijbZ$r?&TsE3pBK)_9i$qp#GX)#lCwH5=+c4VaEd-1U0)pm{V!s-IhQ<=m zFI_gc>x2G_4Xh|x4nM#fS$Y&9jAFr@O>FzmBtOy4Xn@s&2lltS;9_GRZ*S+6M_g6# zj3AD{#tCX_F8e_uG7h8(X5Tck!1!;UpGS09xr}{UL3ISfo92s>r6wcmTWuyNeF*yH z-8;|EoXotqc5poO8r5L2-s<|52Z2?a8cMs}{b_F`QISbG7(WjfNGcd!*g`Q-qKLsV z=lL!vbR;FAl4bEZS7kyvp$v=q+pKDD0lv;Rhf}$Zj*kCY2)Affb~fnnYxQiiNPbn+ zA@U$y@#}#tiZj001a~bI^ksBsp(CO@oT;g)78l${0yKVXUpWpGOvYJ~T}dRUF<=HF zMj<8KhL{CRimnQ?R?vYt-r??r08b$dmU(Fr(3c^TNFPlV=P9z99~H3~SZXsZvoHz4 zj&5&f2eH9(|5u8F)zI$3554{U-H^66sc#|D!!hu}4&sm~sJrBxoS_j?X3&4lW{B_- zdadW_mJDU9DB37Yl~&w9f4L;EdA>&?a#OuU9WzY)Klj1_J3GQxKk~W52S*(rsB8?A zAU-8shR5yy%1m_BCJF}$>GRi1l%Z-10p}YXh&c4@ujyia>g`Ki^xRuEk_`lv+^VcO zWT`8+en$C-_`JQ3lK{x>2Q#;{Qt9(4Gd>Z}z% zT&CIU+l;%7blif?GbF`GG?=;7{_Ms^sQHy2Mqo~%f42vNuKfuhCLsZ4M7mR4sXX?P z%TS528!M&Pw-(&4+6>;O8-K&V6EyMkHKvgZvu}JIZfI>zbZ3x03P>nwx3VIkf75#q zjSo8;JO_sh!^)d4frf|EVjC@(kXK)Q4`AV?@@fFw**75!Z+|9Jb4(vqy$_MS6xVzg z-J%U6(1q4Pud#$bXPQaei}@SuhO<7530P z)XelIFf28hXoxQfL)68-txl1Q-xD<-U96)SCelB-|B0>=T!`*rPnrL8uW?sn__qaWyWAYxdR)ZffEEt%!G2A)|5tUWH9XwXPW z!@rkw*sBrc>AlPwhUFhaJuq#&q1-aIrPNfCSa8@(B&VBx_Fy}>S)wr!qz*iNo(R#n z{1_ceU)MG0wQ~X6{%RP)@2-?n=P+<>eA1CoEJuje8bS{N+D9dVUF+UM%@C>KBOt3l zjI{lE!wZvGu7tG2E8nwSNKaKuAvUvYv1p-WN z0DHcA7*-c!9+jWFV1iagw#c`^X6&+ZSJm5VX2%XIZr#RKDOZ#@7pMzoRH9N>M-Z%`#vruFmN9D-1vSp&?=guwZ1C&f;u|E$8?!@inwF zKTs$AHYpZVW<)W42kZ7Nhcfj+kr1hPWOo!d9q%LeVU&f2vY6PCs-TB7G&m>8`)$lurVF&SPcj0S4AJd6~3 zgeNi^T{)6X*xc?nvQOG>!}wQP^@G@-=kCLKh|4_N9tVhdl2ZWB&H}VH$Em9sR?D6; z;)yy-^(RunDR-l79Zy*7&S?sml`n(V9X@=zXRp>h316@lD(XL0u%eE$WBAAxm>pvq z85wnUmEV8#zaa8KK*eNJx7e2S(7rojWTXYAByUkC*pW2x6rh7P8#JW^{ZUtE9$3MOz!DvpU!ut--P;N)D=VvWgk>fqjp!2CdungF?7MV+#MW2sxI9UKsChgNplgW( zE6X@&)&;)gk1E9lO7FBMI|jNZH)f* z&ipe(2e6zM%=vxBr>7@oRt;CNX3VQ8>@!?M^%M@tL$CoxLmiB=Es|qrGk-QSN!= zW)qZw2p`{qfAQCBr3D-N?FL3>X2&}N#?!tH>=xiWtaX7eKR;i;7YX1f_Ow4k0kKml zQ$?IrMlTa<-rfRFB6viiIuq0nL=lsYU~>GN7T>-Vmz31q$~CX+LVlZOWy^SfbAHKk z^bnn%C?I<8-^v}vLHLk~Iq%a2l?SDRr5J|1S+q9L)&xI2I}?X0`Mwyej09Zt(@dD3 zD43#rR`59$)fTB|R*5aY@sqQH>^{Wd*^>LT*TE=a;oed_GGu}$a*PPk0yF&UgNH?} z=jW)1Sk=#(mb_N7`kAAXi@j>b;Pq$fETYlzpPD_V)(5Gb9McOf_U}T^LJ?%Onkcg0 z%KDVgf}a~8i+{Y@oS&N--^tQFWIjxkY8^gA7UZfLDCoNR_c^D|N5;e6q}#oXD6Iqx zFNnV!)h(YM&Rs2sU%{RzO?Y-}xi-FFh+r6NrZ?4(#M=UX)vD5I7^A)244A;>HC(H) z+E7xnALE}hBid;8szwP~Kz#XCEB@gESQZf~K%^XI3=d0Vrj_FE7H*&9^KOoOQZs#V z@ftn%x%g!ezxQRW;q1Zw3nh2G?dtyG#nPV5A?m#i>t}VA@r&*xo;|G^HmU$olyb z@$rHF?FC8i1K>R_ajDpU2LnlDvm8L<0H2wi`-6G*;b+|~oZGc>jCy<<^Qd_EJ>4=b z_WTHG`e25sGJ_%NB%Y%gDAMMH;_R3v=3w>0V%gFn!vzz*puxsO%*`8EQT_6gqAHpq z3RVoNOQsmHuQzag59|-{-VK@ZWL!CVb&B^yEU@u zeDk|=d@+s8>O(gj%F$Xoa`ekN4f`quig~|1A|zx|8%uh0yVvjnWX$@Ry#niil^)dI zVp=x=J-g2{-zvGVz3s=Bv+M-<1ACjf7<3cXqwt0e+jz5*z$n)hrQ+6 zR$CsI?<}LZ!ggT=Y}^$-vt0{(C#Ky#U0I0DGJXw*O!>5dz+i%mDdpw0)j{JoR36k6 z1Cr{(7CgbMTy|$Bs=?jT7}P~x2}6=|gH487!^%RG+YEY(6x{i>GNrfxeMWx9%DSI| zy4EZkl_N-$7y_fH!i(aJTj5hEu@5~8$dTWXOv@lb$!~M*2AV7WIR%aTFifw0_55Nc z#iU{gXXd_Lfs!c=!DTwv_h|vH*w-_|`WKg~O2;!1W&EoHwS>Y%SFUGQxt0RPLD8!G zx?cQq9I)Aj347->=6B7Drr29bqh?)^L1{g&%q3)z1$n02p%pr8l{=-Z_tA@n!b1xs z4fr1cF*5I|?e{8dKE(o~#DKHz&J(UtCq^U{Z2@y}p1GE5RUMte`h(`_5A#A`1gL^X zeiPt8Y!oBm4b|nf5qW$(S7JJmfyV&|j_|1>g|9oV&)m53A8=~EUNK1ZnnbXCstRO& zeZJ@jq>sSmvI90kF2HH8aCzRn4M#4ZwY`!clUL?uXI0BI^K){#+d8aP>JJYOvm6ni zq-CL7hbo@TO-l<44<4^}`W&Fl*ijC#eaKJv8gnMug20Nzh&k|U24JC~(mI2D(j*m- z*inB(bn*-!{L|0kC7vQ5YTc|ZOYZNhK_zOfOS(c>ei$6a z@L>uS3CUYN%SfIh0s&eQ+K0SZLRFQNMM82&xt3=%AmAf){IWH&J1X>L3`hvR?sRWG zu=YU00+Vxy41+s>0O-io4JIv^L9sk-nw=2zl&ov?(YJ9g84hQ^6hO9jx-L-=o_b_| zhW9~k86OZGSgTr~9MIKWx6qq&C}!V)<*3w84ZU*0h`DGpfRyz1&VeKV1NtOsykKk@ z>W2^GK82bJGkju=~EcXH1l`POs zvA>KzdY!XQjTncN%&0+bZvaSD)-UnecIi#KU{E5OBY0Csh#6~y7hL5Z9A7PWH}_V$ zX|OTPpUFSl)?c|kA9HxwE6d4oY3bG*6B4dhpW9i~v85T)mQ$bEk*Fg>JNy6%$%y#U z&HOw4Gx(^^1By$q@BArsNXHanWjzIMY8@t_^0!;Ml3x>+QSI&dhYhh=xQd)lKGuv_ zdoV{*Wp9g19gPSiL_7hZu=Vp(!W@ewny3_nikh8?sDc8kTCV0ngmYgsP(|O<*lMw$ z34K2Q4!eY`SP&NFlr-ncsJ@i4)0nJ1CM#2rdm_*40vTJCt5RZIbyE0BgA7x@R2F=A z3m~v+nq*XuA}*b>4l>^jwpYO;9?-Q|@sBXVLwQITd2$R8G>MWSzT8Ugw{8~!S-uWXQV1}HvOe97)CwU`}AX8Jxajl{SYmM<|_}*!8 zc!F`VH5aUE`xXM3PoRkN0obN_$>%;r)XZ_J-AF;}tJz}2qJk1i<6hbm2_?ytZlY!} zlW0J@c$CU=mBf6sm6MSP-2LP`JUutn<#URMdxw*RzfQ-U2Og`%^1f zPHfCWul_G;9ZXwhhWIlpZzHV{lZ$X{7ag8fWRX-Vrnl9gBkf72+Vc9-wEGZrCuEw< zg?WsK!ZdC2;k%12!HcI^TwewCCo=ftGkDESC`0g>lDJrrfdF+K*{mD@tf#WBwtaD@ zX80hu0>?O5))CT8sw@xujBPrgFVnl8hIx!24!mf_+suXs^k*xwb5qv8-Q)cXyy@Ny z@;^&Jo=>%O5|I+h$jsG-t>V+C_eh6n|HprJs)u{i;!58j8r+Z1TKM!^xmiq=w|PUJ|N#|l&uK0OCS zplE!&KZ50c=MZs-loNa?l-!>`lN00V#SNn@mF#P#^}P!U3S#(VanlK*fi6vIb^05F zM-u}vxc7Gx6BD+%`24;P#KuLl*x=2OX(`_`Wxf%BWFX>-Ax=bp_51B}$UdqLk-@>x zaD)_m{ue#^CK(S6b|QxA$4MHlrsBN?RAoMvLx#8ekZ@rfJr)DNVE+ zT;m|`!cmP+_+Sx5YbH~9K17_!koxcyDv|8Sl$Di%gMkqmC5lEM180%HJt9<)rsM9{ z?GboBi<0g`6~Uq^+xluD=Hq$f6&Vt>P58rF+vW>0gEetLa;9U+Q@{&OU?lZQcvYMQ zR(jb8e1pNa#FZ2MzS3EUp#0wSXav=+uaa3FVBe|sDBYGNe*B)G5_UmoPVpTdA1^O2 zhi5jzi{@k{Vo}@3rP+NQ^6Xf<@pVY6wVVFBV{QocY!uK>-cF5EX*G~S6jE9`c^7&w zu3A-c!s@|Mp~w4${p3d5+tK~K{3Kym+!Thk} zB5?_QbY-TnZvHetDD}W{#$lG4c37sO^3i(6ZKBw3M)h2($cyLZAJkk?8IOGG2NFVY zcSm!mq@u#c#1uW`-qH#IxxBZ#yUE}`UWm-+i=G*~kd<`FKOR5|Qo(Ip;)*CJj1%LNFO=v!IAsjHsl2j{c}#w*PQJ|`!qEI`ao z&X0Tm6hw?~Q!IbDV2;I4Z0cm1*dWK^;ocB#C#z82T+1>8Lo?Wdj|Fm=M`s5+R?@+4 zJY`14J?Kp`S~m6dK9UW#Y|()szf_yaB&tuK65J7&;Cx)|Y81Pyux`l)@ziwy@0^Op zW)6G!-%^_Gu3=bRKTxX#2d1M6?1E%Bye&<`*)FQESC+0BJ5^^#UDk7=nIn*?TlS>rU?HQo3Vp*9X z*EOLxb})7|8Em?f2&J#;8X8^aA75afzouJZjQ}m?p9y6YCRo`CJ`&{Xue%nHXQ$65 z#pO};&C7zcE5s9rG5Bf?;>#Z+aB6y|b4>0-24kQ}!9u&fvXm?j>MU!HoQ5RPK*!_1 z4*m;^55-AR_UNt36az2VSSy)h^(gwp3&?sq72#)dhlWx$mQJve&2er|v&-i^zC&9G zXC-eMFApSGs2OWWA(%2zebSyKqCwSWEZZbL+uYQ*v0-4C&{*sK9kYHP)>|1F7z#na z2Hl~X5AH?^i3R+-^mN1W@*y(8krBb+;laUNZnu<&niv=ul4tAa$gPooo9Ne2NL?CvWeR0l4dq2e zF7wxpmB=%ySVN6^jw=%vkKKkmIy`M{v-^)r$wSc5kv69J#E3*S)Wzj?IDC1!DK9U#wzjrq4O7E2 z;Q0p;1C<~eB?xF97#f1+N6yV$Z?LoVjkHCs%}^t!f)hYzZha1~+Jab};aQM|xsf!o zv(b`h5$XZ9;F~w-1vDq8FQ6-gNLuyy0GG#4ey|wyozU$Hx#B(`G0{iu;2Z+7{p^1y z(GH2geIUo*IuQoj01x^j{hhVW&Mh&Azie?|*XLOLn`{KHI|wy_eYZ_C%*vM7+rpl&Kq-@w}Fx9{|;xoYmv?5A3$IipBug2-$pKZ ziU7-Q%+w_6oWG|&TS4py%j z^ckqLb$7kal}NNxUB;xO#;_e8AI7gP|PWmaXq7Rp@*dRf9CtO>Xjd7X(n*-re2Z zo-bDejg6eTLOo8v?E2668!8bvRg(vz9xE%W8by?97R4VHC5N|f+{a5pYhIv8L41b| zW-1IgoMSW!o*R|*9OmmGpgnvtBg6I9Yi_{Q{Gg#AW{UnlN<})^rTH2-T?8HvdaE|U zdLu}-8TX3MkN67H*)>kU;kN0!3a^>?cK?O z!3vFlfF7|#h#wFbfdD=Ih-i{%4Lq1`OJ7asGkK}KBw|VeiGR^R;e)kYh-d;YF*b zr@YfQkoD?`asIYf9f>dMab|tU@ia^f3FM13XA~8h?NUHI9eHS-;Xp(v217mSj+S)N zCgZ;*DJTo=p!`q-bZ%Ey6_q&uKppg%Y^MQBsB`DNUjD~c0=Dn-gd8@Wh4aS|Ki!Kh zOqQIx)AE7rC}CmY`-_dpbAI>ju(3b$LA~E=c_Sl@s>#bEqJiXbZkEUq&vQO+jUfd| zQC3J2a6H4BFRpwG6-c{G;WSN-$Q}Z+cHZBfFM!acUV7W(-`Fh(mLrvg+8iiLK9RhGOh1$zS(|5CN_S3Fiyaj?{&v=qdoyMv3|SV-YqeWAa8~hq$K>K5Sr2H>Zg7 zqbmi3cc^SZSQ?|>&QV%)mHAA;^|c+SLx|45!5UaOLIa2F_=hOCX?M4G1`RYMTp1MT zK_(=)Q}*XjMyi3iTI)0WBa@5@PXnl85b~y%7vtmO>$OJ}z){9jnrJP|pRg~;0+v3{ zhX!N3zS^V=^Wp2F;5TaM55w*DN6KGEVcRlH3}5zzz#yUVOd0|{ysf>gvUSo z+|#M1!#m(v@pO{0u%ND{VZjBcFGc)r%l^(ntSE;e=b)c~hZPBc^YPPpC{K@Pv}Bqf zXou^7jTo(L6BC=d`MHD#BnahPcmszv9?zTyF9IsK8-HnKgtEWM7y8$=<}+~NVP8q` z#rT>iMv*UTFKT}z0n{dZVQ#JTv)Vkyi~Xh^Wh@}4&Bd)`$5M(k=3tkrQrPM4c<%D@GD594 z=09!AW}N`F4eVrKVp7WF3KfszGEK~n7nQWR*?IB#F?~H{eT2feWDNp1+pdpU3?Ee6 zT?ZoI0ZUethpnvFf0rTr3DKMn0D%#;)vYx{_F1!iddzq^mA&diL$|}7j=0BR?rvgu%WmEL`&9qy+c-K!jD z4&>D~ed4^IgCnOo*a;X{m5jpCJbK{d=y224U}3I-EDhpc^Ev*1PT~*Yk_GTiNl#x( zMo z=)_qAcjDyc9#7?n)X?*~x-37v-96VLPeO0}o_unG)zd~XLPHIl8bY6c_(3u#ky5%E zO(G#9BSYqr$I1HdI!F>=yfb`^E|8FcVxj21F^w*_dV`mQ=$ttjs}mFBm6i6a=T;=> zA4A)tHxI#uJb+#P?+r4y6tMsD1toDnb&v7&O)V^1+@I_?w;XR?qJto_A$%nvt7#bM zJl&o{(!e|_pViG%UD_iW8p?9pr7D2rs& zzZZH;0`kHTYs>TV@5VSbI=L$H76)0L-{@)R(S#0@sVZh-x8Y>nQZ?^C3ElLrk>=xn zv#p2Kb_ep^$j=Rhp8rJ9uTYm_4bQKMRXu zZbKY`k#^6g+D5{K>de^U0Cs|w=<|Xhw2;_TXAcj){j->JNKlD?P#Y2vnENc>s)Ddt z=0(Rl{(WoAjxPR69TAoIV?;ST?feZS2hoC?SE2$j>!%pA5OV@8IUT=3`ON+Oed+B- zTiHJ*X9Dq94)6{HhAiCc>zfjTMqJo0WfuYJODxaSQ)wrcdPJJ`3{vPTjL<#Jn8zL7>99$S&1xgj7?SPjpDV#YKokQS z!Rs3u-nRlBQORWqD>V?yGkL0a8h~Ux#uF^xt}EsjdQR;XPHxx&p3@Idx2z3{3N!96 zkKc`r$-w>&{s~3FgeUnJHGwpvt*xsOh7K11fCa1Wp*(}@abiOLLbs%B|H6l1FgfLc zd~LB?RiP;mDO*Nsp>o8W!weJmwAuM0qc$9G;15%USWJ);cs)U23v^)MU_sBz-|MH* zbWO{G-}~Fq{*s8zbj)MiCh>hA$>DPIJ5qECxF=@Me>|G{1cEW=W;y#4&rK!A_cxEl z%VP)Q(tj!-ATsinfS~04R(LW7WM+33BhtQi@(-S^Gyt)1Cv47ZipITvVoHMbLaMbU z>S|G}p(4njb{~}&{hr}yafC`bW6SHM5WZlQwDhciG>^=#^k}bu zvy5}Y&kY=PPt=^`2j|UAuW@Jajs<0H>Yb#lyZA{6#%XHlzbvlRVVm2s&*S0)Su|z9 zhX-3L9q&OnSeo0SyW;xZMHAUB9b3!T5kTP#s^DX4{c`0?K3tOj0pg&Zs7Ud5874ar z1Ex^7a2T;eveMEaKmvPsxJPVCQhZD&-Os$7;^Lp+8su@xvxn4tn|fSg+?EBkeI*6W z%shNjE1f_()vPMk+Zx4;ql(mz+ESLl&#oUI5oT4CozxwZN;|io1rj3kCe=V_h96_?kwZ1=O8jdw}aFj#qW9uxk zq06FN8h*BOBl!#g!MeMcTF6P{@JRG!#&>P)Z(*7Y7$xVOx)kfnv03;Mq-*Qa- zGD}lIUKuw+F(dJWN!u?U#P4i$Jxavk$wUJHieWV$y?f9PGcXu*v(Ax+?Ss<)0I{Nz zj7U?-&!5MOueR4q%gX~w<$w3&ACu;VtUvD_Pu|CYiHzOMzE6ISoHg6o0|HQG(l(M0#c@TNW~La-37 zcjhj7-r%;AA?5broj(0WFBT%h=-W=jr{G?VMWT@?oCRm>_g7msR@;ey8k}RUVd%7^ zl-98(a1Jza%jL?!n0}8?%EGytF}^O7I@jm2>}2EH8Ng=SdUWuP)xzR30E~-^3kwSi z91=m4{GT*oPD1#}%btRQVqtEsE1QvM%-Ul<`sccbGG9A_$BDeG0$bJe2ce#?%Eo-* zkOYP^&@}T& zRm=0b zJD1+-(xTtJ*9BZpe_P~6eaSa9k|vLBVj+H9uack#J&$fq<*?~)h^3Ghr~OA3_tVrXXvlg1>pM|(eeY~b~|YzYwY6u@7Q#A2hnz4eZ@46&9>)x0Ck5w^>CtzdPv@Kwlb~U0vyTHh9u9%|u8lJ!>#>ScVHjOC$#^WA| z-bGfRBy%JLuNMHF{_JdQDsm!$)Hm}@Vwg38inS?C)GqtQ2_7Y!khpSiE?ygZo{ufU z&nyM4EoB^6ihaG#8;28!UQC%c@GX=%R~b2bt!G3s>l|HAzD4YNxyPa@L$h-fh2_?Y zqGlV1n75-7A0J}i9IlX;aao;kIoB)~uF$makk4uLFM!g5O7h;Vc<4-Kusxq*KcoVV zXLYn7YH!20=#X-)K6O6(5@k`!YcP#y>&f7&c|d^U0)U7^5DiMQ{oF@i00g_*E*Q+Z z78I(8V_(}3N>)@n%<#25{$C1!6-%5wg&Mg`G9ky%MWR1A)%qUic-oUy9|ObAQu!ZAU^9OLG&~; zywj-CnesV3zq>g>JfP4Ee8$Gcj3Ob@FYt1*Ov$KO4W>6z@98W8Kg5mH#BtSsiAf^( zYmczrZakDt{o)eh{;bJmUUgN=!JXk= zw|imQ`}J&n_H`Yyms=S-kDFtw(@vqL$MMny2QOmc``5!9*%FoorzR9@`ONuZvXZcF zueZf8ABXl}q;ofUDQF71f#(rGQAA-4M02JWulUqEc7EjH1hOWOuOsLTCE0lH1CN$P zLNjq8N3jR0SKi3IGw^Ka)G={8NIjC>Y_oE4koCpEa;tHyYNKXkW0K-@!z``RByGc= z$@w!KaXRef5=Mu6<&&R{Q#|B3|4H_!OOwZGOssBp<^-3A$@sv`FntTZhs0Ej$AbGO zU7OeQj>#OR-Vp8lp+M)wFWbJYrg$=!W{NL6#ma^ko!%+brWg3XhQ&5iT-(>JW14{W^sV@{xIp+^kU|!oCuxNJ3`_e-LKvg z3`jmrCDh={eblP&9+d>=C4ZD&~I;I~bzm3Io z?>+}-GUu|S*J>q{h?W!=FVp$kz=VTH6J%!(L;wNoCd+Sw0bAA>F4qPf0Z8IhFuqOC$arN)5nK6%^ z3*9{kkJk7x#Ozn^{lAni{j_2CQEIwkM^5o`*A2&WCd~wtB-AqctzhfD#m66vLLK9` zckU_VQhK;RprrneIJHohV}2s60YhUvbRs*?ZisM+MXU}}7$FoK8x^~U$R;YDpVmf2 zqqQ>uC${cnHD|23)oy+O484{Nc;>)p_pO6)hT)&$Zxo+@h^2WvyP?#Uc3cV8Y_Dwf z(Sng2T`;^j8#69_Vp2oVN39^g(g-N7!w;vFF~z`OT7*zF5OvVaHTs%hLT;qY;cfhT zEVEsl$9~{nu(BE^uWVM6kI*WV{UsZ17x73?gO->T%%xGs?6ArZ7z7{11*shnA%=Hw zIwNt&Ey7=2!RYdXa=CY93&&OYH+k&0^StcHpH~JfpRT?doGb?x@orC4(8@F1*xlcx zILR4A1%H*X2h$lGy#Szt4jG@`(3m-TJ zFIypHK&(f|3MM)fkqiKS(pREZ@%hC)xu86I67wc|r+CHFa{vQ><*2wbu>Zr%ZiYa| z%-q!5{fbR?3y?+xBol=@&-C1A`}%Aw=~9;@M-39wR4lKpd0g)feZkv@HK8+07couk zz9ts_|7Ix+#N^pnl7fs7q^li)u+giHCBBJ8$eZWFAt@?m@-kpx4|hM?*bitXPA+&? zQ?Pbxi+1Dq^hA_qP5mb_T^|L7)9W|Xq*6!w9x6f;l!Bds2_Xt*O-WCU#(<@a@kM>m zwu&IOv{bU+6%A1Cjs>Fj>!qFte>xF3RJh5HL<}G1EPdJDzIBTT)ezZ>zd$_Ihg-fY z@zdR@pb2Sa%G^<8+=={wEBWQ=+r=u^{Ic&Q{4R@}B|eb55(y0qLD^GgjGFx1hubr= zDqweE+(J(}j&q`=3vq&$cCO#CM1+L8CMPeib_QO0MHCy3So#a9AXTOAO$Og{c)Hi- zTjTZAEYVXXt&flw=ytG+Lr$Qm6Zq-H6Vf@MhY@8Sr%W0q^^HjhL4o3_NCxPq_E{Qs zX@ik}u?z%>FxFO~t|y72@5vO!xZp`7r=aNg+6N;iCue}r&TU>kCVaD@`@&}fFLsf- z$G|19Hu}=2hyGe@+8kBX5M}l&{H7RkN+sOuVwQBGLC5S3u<9@Heq>bzwXR~fbhV;1 z&Lp8OJ>25#T0O8({YA`%$YV3}2GKZf0mJ$`tOK|DXG#y=QTOcY?S-_#LPvj7&lPJ! z)J`jR{mr`;v2gg>+Po^BnvI;ux5}wb%oLrk$6l{eLg!sJ?1!nH4Kj4-tG=D5Y0ab4 z7Fxci&z@c8GnQkQbz9%(!XLEihnj#NISGRCk`Qh+2e!m^BSqg#kM8>AmU+Q>5tK&i zxH(o2#ECq~O}P3@RLcvhW03q}VzGIf@EGmn$A3LbJPX-;@iqR$`{Fn%y5MOT2>*Vi z9S{W7*^K=eMsb#p_nB+oUW6bN9~_+Dd60em7N;Fw&}x>Oxl>G5H0F z32^@uxX-U9yg;$0+vbIRHhbKWC=jpb?%7lK)4?}pC;uRog*};uU3u-%x~CWKgGjp; z*7IXxQQJAU^XHb&9mtRZGIVt2r}R&U$w;wM6Wa|3xY*iWJGIY0*3>NdL#0-uomZ2; zABlD@DWbRBZ%uf)y(Jsf5)xi_$;;PsGe?}`HB&bu%_@(u^FVxn1(vOo3ScPZ0JKkB z86b6RI?AqeRCRy30LXaxi(PjAP}e9VGTZ73{_xgvXH!iBPxbBTLaRnDJX@$tw(kMo z)yiKY8*RqQUILZ}H{RWkn~pSlo0NQvQ`I(T(1YbVmc`e=dC2Joyq!hEz0HF05%5s* zD$0H~yqvUjoxj4lQ6mvSH9r*8DX1NIv2{;&tDA0`MZUu)4z=UOc6)b+x9C%U)&ipo zHws2mG4vAf5H{&9v12D;;4A-ZeHaH?j-FDmPmY>6?&V-`rPgL_gtixFlqCnFBc-bm zG5ZCn?#(6n3d6qPG5LY%O9R&a{RKlp+~Fi16Ykc`DvGsp%D8~nyi%z66h7;qO!^lh z7PD7n4`XJHns;xel+(vc-{Ks#llxEKK4*H_R@Ldq(+x1S2QFZL%7Eue=tJhGTSlv+ z06P-`+m;jLc95?#JH!VQ1I*&C6+Y)ub0W%#d|yn4Xpwql_7iYUIoxCJn>#82zq$Z_ z`oq$bz!S_+Sco>Y*MKllJ>jGUqk8(zu!Dh+);B?gJT*@ z%arJ{NZqk2*Hr#zzEfovPbt4Nf4|^-O?!fzl-UI)`09x9lnXbK`^r;eVM+jsUY}Ka zFj@E-)!p>WF1tvALM#PkQu}iPX)d*VQCVq)RH)Zb9>O)V`vbV(v|}|pnUS89%QI}2 zajK1k@4E=_W1kg>l=mp-7NMzc-Yx4&N^_K~qMO9#5h+I3&&h@Pe&Up$HsHrk8)h?- zQg0M~O(zWrwP`iP>NuUm9?5XA;Y>&}-PpW%$UD!o;|c4kHnjo}$3svQR8?ZXfFPto zY*kb%bBsfkKh*Ni5!dMyACp+UFNr&Lc@GjH=tCRzpFd{?l-{W6_=h|0Ug z-Vw{MMQE)zrk{Uk7BOS~OSr80I!{e=b;uVlgZ^q0geB^xI;%P=X2cKJ#2+CDNooDM zG|qk`J<_5dB!d-#F(<=GNbNpmmMPwXN;RGmRux2RT5?H72K|Ff!6&rdpEHaCzH3 zoOQ+SbqL*V{8^b{Mr4)Of9;uX>RMjWp`RzQw-~!G^!(H`({$l-kc@jx5%>A66iE4Z zwojpjru?s@LT+_A++En}DG3U5ab6M&>Gd52^Ibp)D>2b+UU7H>Lc_uWL|P0@@Ecm+ zc+5J=rhvjn&}5ZNWEURk+2M+Ew2TrvbM`}n{%m1(BM(D`ZJFnO6uk`IY&KVAI#R9B< zLP7#iF081i7#}Qe&!COxPbwH09T}y)pPnzAF)3xv_|5=y6qWLm{QG#%L%y-MOY8hwE=t1C6dY{Cct=x^nuZ&=^XRv9! z7V;J_KG-wEn+;xm*-|j_4r1_L|B0$Bm%jL7Fd5ggQ}N;@TQe*RmIyKg+GAjQX(w6r z9I8)QSg@8_1(n*mvYN_)i+f+;DaZb)i7c1K!X7@aTaV&RFEV8~2o@ z7=LUD`HYMBwc58$w=h_IajXlZ%Vk?UKLof7^|w9wvzlTdB>N8EENMtxF5b$Xq68?D zU;E9sDxxWovN}sRlBe~0+a9y164D-8S57C>KXohF=pDjvT9_`bub_M*YFfPPV^SpE z4QI1%p0kKx(6O+F`uoW!C<1oOAWSO_>yp)pb=~0(1C$%L2}{ zz%?b+J9~)lm{ddr-0!>FT0uo#k9%nDJf+ecAHXEz`EUyPC6(|k!D}NwKOY=TQ&q7@ zKpk`cZq-FULd|C~bx)X5rzween)5{Z=ykH)%xL$srR!rUuG4AELT+J@C*R-y(;VLU z0rxJ7y@;{nqHFvNI(kCk?(=L(Z8n9umf;*;o<{1f^8=SvaPel<=U3^((iZ&3JML5< zd_dY9#~Tza#~qc$nv74UveMtGf`WUWV(i8JoF=TB!s}i0rsYQ{Fm&c$V;tV44lX4p z)^Mpv;h(yD+x%?o!%ymAlLrhu=~$)&es* zmU^>sVOs|~x9B%`G?Rt4ImNu1!)`12y}h+gj#sa~*7XJ{hRcyMnLa+Xu#BHKR#Z2@ zCtZX;pB|D4hL+wN-OWKG1rnORvp-+2)MU|!=v-M^cNQG+^|MP$O8URMDk>@hNW>1J zj$S;Xkoa1+i1Lzvta4~u#+)ogHJ`}bqVm$>TM534yW}rJJr_GQEju%*-@ln9D#ub7 zg+;ytJzyX~02patIAck6+$9aUi5aO)DDhcrI{`reM>cB3BpR5E*~cY;03!Dj*pKAZ52oKQsHamX7FHQwDj~E*x?STAd^^PDV@b^-?;I7L!iJ~ZB?Sr2Ssjmptx=m-dkE1;jIvn&_UI1TvCqat z5?BfZ8H<2{kt(RFK3wngy**tW9Ucl5Z|55yFfBKyGypb+=XL1us6Y6MrOdB`hu6P< z-!U-Ke|t#8dtzp1$86jL6cX!A_FI6irI$ftEP}*G{I3jTUt*y_i2&!Dq=l%Yi zB>=r|RurII7w4!AYf@k|0~0||^Ou5Ei5c2&7Zi!Wp?Yj`GUUS>4GuRQRM;6|;5Jr3 zR#(^!fq;ws^B}S0x^1aa7Y+d-IGB);kr5?!f0akeE&lQ$ zx<8-t!6`q%tExtCnU^Uqm#PS>a0ee-g+N5*)k_TV$)?G{0GNQs^%9SY;CdBkBN_m$ zX9wgCY5H&la!zzJ4u`+8zDGgoC;(A-xzYA?J(HiyzyMGvAzk*O!_%=c@vs}rUEAOz<+{{RBl8Ybnh04; zVbH4Ow5ehd8?lj1gMv9Qe9V_BJJ8`x3Tr+({>OQk?3OuozapahABSPO#9+F7B<|&9 zZ?>RfVt&h7t-d%oIf0>O=AQNwh?EGvoLO89F>L}Mat-64LeWmJ%67J2qv7OlA1+gH zv%h9XB_pGYqhZbyv2oI$%WS$E@vTFsYHNj-7*}yM2P!M)?l;a z&*sG{-V`a_`tBTF7Wge}-n9`Bc;SDHhpJYYb$lBM_#oP1=)6!#g6s>^?1R7Mg68#e1OlLOKE`Y#aF zApjpZjO!MMz`?;WSRp0bHV5HcBv#F5<9$=KI$Vw=4PDsKtg?+>tDmimZ_j`hco0^Y z36+k@o`TF`B&7>Uh3v9y(NQ}U`Qqj%qm1&et!`tJ*sox2-~S^YmS3N)rufg7e~Ux^ zM^*Yjh4(V%#ArmC#`%3m@iWZ=8ee+N4xmD}_l+0)Ci9%v?--;THLN@^2=~}a^oyF) z4B(I?l%#kB1+D}LsXVwrU?Pe9CsF*~QGx2Ce+kcaBY>`&kg`n15mI~o%uMLTY5f(sLdns>SD(lFsxsYn_5B97 z>u(oF2UT5;%WQMHSbn3fp5fNX-ShR4oT8qKo|+aga(V3;Kxzr|PWW>$4TnO)=XpAK zyFdi>#5<$%^cI$Cu%gPmCXLcFsJHqDygR*t@;9L$f)NIB;{S}h|3AQr7{$-{w7LmW zlCryDV8AhemGQ1V1N2$->Xdh(K2}#_xCU6}2Zx5H(%2%?<6BJ)eYWD?+Y7E>dJQWZ zte5_zsE=!P#!!yfyzp}1+jsq+G$@TzJ7SeI=D|@nw)6G2KaUGgm9)F-)>IXiR#gtI ztbjF~G@MYD^GC$pym%M$Yi(d#U{l3NJ}5*!DbA~JuBYl^;%I9uiddtd+rY-b=^Z4; zgJof7A9RM(n=%Pq?Q1WNdTIlD8pYOIKDx%OYw6*6EHJIA{@SF@^PTml*?P ze{hyUAqvMWqCfSL-GKJ}AO=;W1ZjJ8FgXE;=Tj{YQp)i-KZPJKat46)WwGVMd+_-E zHaZxMYd)F5<*+T#`M6yBi+Yxq=^4P+Lv`A}f1Gy)oe%`?3<-raW$5s20@lF@?;Fk= zRfvjF0gmbjaNphUf3?1Uf65zi_OPxX4e9maYWGOf7BpN&8Jm=llaCsch=H?jCwkWF z@_Ed${&Ar$ZeWBB^a9+3h-hi8fn08IaIl%$W96o@K}RU%gp>>E^-%T4HVO{+H;zd> zz~IH0L7&O_E00M1vQYX-V3c*}^Mg2HHocwO1-K zXW;plc&if9y~yM1-1ntI1JahC!nG79Kf=YfGC;d3)tU2sq$ z%qNEJIMwmHP-K_7qSged|)5O=y6ZsLZuP5T~GwfXmL4y~empd^l< zOCW<2&l2k>_kX0v5g;X1R#h=_Ho87~r5+>wk6BZ|0Ky>k2uPsw&a(|oLdU7AplWoX zZpH5@AaQX8doE4cuGU)u0F9UIA$jx}?==Xh@-q<~!|;VM64dHTdamwy)>2y8=vs;# zPb)SUv10|^H&3}N@ELZ)4)ODEV5s84;#&Yo2%DT1pYTjVLIRv=Fny*^irXARDvEf`B2ij9DonX1CuO3%&&?PMlM{4iV)S(i5xQn6Tm2{3*H z%0ZG<##j7qAaAbKhm^y{iSK`@cN)W6#hc&Twtk*wO)|GTouF3Wm0MZ7{T4xLg?IU* zA37R1_YX|>8z;%|X6AKUm@?@mJA+^2pH8X9sp&?$==r|e9r5}kUU_?oI|v>8lgUZ?O^|_Lk9cmSNY+tM5;0_ z$^#pQD;I#cpF!25R>!4ep}B7DfviW)^{0*C9 zd#7Q0q%4^B3)M1bBs`mteIf|%pfpOgjSUeFY zn*i&r@TB^t3;jYXl!6d9B?kuw=kmkzqUdG<#>XPj!b_^RF8~mKPkR8)v9|mRamwRw zb-^xyAzdSrpC3?f8KbYG*b4VMT!sF5m^8?5LcxB{ zX9HLS1R#8M`L))DSY0&9kP%n?v`Hk5qT>e}0atkG58hO=(5ha(h^Vb@`?ad|v>2hb zj{ED1o{GjZ5`Kln2K6=@SwsvP#$+9L(wi>Wv+LL7+s1z9lb3lI@}I^@PY@{w2X;0V zI2ez#3}kvC6Bs`9=3AZnjntIPopU~m=K`(>DWhNMl$_AWvkPmENySU`wYRqbmm7Ey zx($}^76w8>gblH5^Ei_)uB@z_8$e;!oD6>$Z4TNLw3FE7f7~)xJk>um#9=pG%QEaq zRlUaMI*I4bjW3iMfelID7q8Qy#MiIK)rkex{2F`h^6-6gQ2Awri#s}vn~=|-Iv$=8 zb6-m;2@^#CqTjze^QY@(moF0Tt{r%2q_G#CvH-+9D{E*}HjHUC5m0%$)*mVHyTrFnvVwub0t9#r>2gTxubH zu-s&?@9OGWqI0vqSh(0HwT#l8el*yUuOR##ERD&d>yp{>1Sjf7v!e8$=gY0Budi=! zB@t{0G!dAQXijtU^LDPV0pyoe29gQ^#+eRhvmdmR^M(0BE{jS^00}Xe%g!9U`xKT# zkxZ(Qr6o)L)H;g_QRFU>k?99n+e$gaLO?~|Jd4D#d-qqkUo&V_1xMk$p z88m%^BxGRVL}EY+ny7}ST=6+)r{{(C1NZ#sMg3sI&39;5zg8Re_xJxGD$OAQzXL+d zHQO5u*U-?Az4ag;1!GTMD{_|gC&}S1gK=rrIBY>9U4uX(p6~Z5SoN!4GA?sH`wXl+ zDOi}F4GBmO!hd`Q=Qp--mSk6q^$deH|LOfm6hip;V>1b&vDIThQ$buW)t$# zk7Cr4Qaoca&CC`$b9eiMxST_oed%E_U+}!j5B%Eouu82e_z ztVIppShvs&rjo>(m?jpG>8C&yo+w6ObawmxzlmZ70R+*2ni`ya$fK@jggXsh=K=Ke z@jWOhg<8!DrCf%^uMgOL-gc}Uz?-S>%iEUMKL6GK8I>fm89nUqIPxzNyHpattwrdO zo_ScB2)m~U7IG|y=o;9drpi$GlL;#VudYA=v_L%H@=k!cRhQwOC^r)#g$G2`_^P1$ z3D8~uQ1nI{r+r@8-EXD_J*VIctGNp9Rz^CB7tmhBr``r4*V5j`cb$EMC>k=m5r@aK zYn|FtK|w)|N0TV(Qc3W@+oTl=6663xFk0_#=I3XuHzby|QK6gNqZHIs!5Omy0{#@> zh;8=S#{*JQOa>hRLv_du?(g`wY5K~$x`8<8@^(5ZN{aa0h7{3alv#fJaV1;RH?MH1K@E|ICnr07zxbH4>^gXYPC{)9L;Uh;Bi6d(0g{d0(Lro;ZhX}F za{Xn^z6)@ocV79vJ|wQi(YK`{_BX@Z@wzg;^jwbT%qY)Ki&tSpf)!o$?0KQ1qkp`= zPtO!%2pFl0_~k$g%)&mGIpK1GZ6U$53bps2P8YUi#h(Dd!<-PDsLadlT~lm(v}@hQ&g9xRayMYlbIcV`(X=9JySH z+JZy^6crGZI1C5_thb4Dj<^l*KEC0Gf;fe?r5wfWRWD%IW=Jl+Ns4Ur?b|p0_v^sG zKw%-3<6vLc{Ua0Dd0DCG?)aCFG4WN8kdANBNeR%k?Ek!%2-p!f^78YudE7~?TXXrs z1aQSbqIeDZt}eJ*5sU@Y7A7jr>1=#Q41n^nM%WL*) zEX?|1lQmV<8Uo0S;E!fdQc597*bzB~vAPF*Zp?@Wo%VxRw~+tM0=S_hq>t2QmEe+0 zn<0ml@>QJU)sR6-F0C&Za|TNxKtm%-`Rwxwb(2$49vvJsSj><}9KRpGa#6j3kgo?H zGsYL$XY&t_$T*%~BKG*8rdI!x$*ag)d@G*;T7Yru75|Q-JV?Npqd|gx^(fC3ZmBc> zE2N_>wze=@*o+_3hg?E)2js2`^+EG|XNw;|hwGV``~Lou{ztBT5P^^lKJX>6&)x{c zfwSulxItzE!*6Z&GsQ?qhp|?}XdvPw048@mrvLJ?#&V0D`B{#=$>M5Fb=N)e@un+N zoH}AK#M8ID~pi@;*;nxk}4mt3LD#SZY z&<{2hzUSN(fT1vpVQtO6wMPNV6RO}gP@ZFwcV&H)fUv!h1yj@&(DUyNh-~$%Bf3IC zE(w|FPHork@&PpVSE~)yzlH8s0)2%so^ESrLoUdkUDdRtoQ841(e55IGs!t^*V3fK zw@UI8aYf|#JRd7;En`-JK?)KuGowH+Vydb4udie-P{UO%EZ6p(#!Fy=DdE2*0QKM9 zkdc;#TlWTzBt^x=PoD>UiJo6>Z@Zv%wE?cyKe7?L+`!jUH@a{FlY<5#oTfg7bB$Qs zW`Ri*;Y@;u_uqFm2!w}}m{JsL$;SwXFf^GAtXse56z%=-pWjJ({8N~LbNAAJ;h%@c z$D%Ps{mr(+rlY5)DgtTF2_$qB>4@0c>Y7jybP@%aPzxMT3IB~2as6S#=lhenBJCXv z5fBt2DzLc7GvMGDm(!A&Nh&BRQWGSSBR%D#Af?kCp*!puB+?Xq=)}D-H+KhoYYAD* z;L`^@e2zwae{yuR6DlaFEQ=taWFvaOw-iwGz9OOAG;`uHLUdP@l@xgG4I;3ahLd~nqP)IRe;9%hyZ2<}LP4kC^tPO&2gby+lGG1RowIH{Qa;bnrwC zAh@5-;o6UYt>dsK#2`E_3D~R%NJZT>Y*}6rs2=ES68I4&5|}~(FX_V`Fll&sc%H}o z+Cz~z^V%UsF1Jshhk$nq*h~8-f+2u8T5BnRHG}rZY$p~PTU6wmLoakKG&r%=JN4w{ z6yymN-mXQ`G19_#e!$nFuW~wJ6gCIpMP@p>G|pu_iABB>e4XTwd7}`g#O7+Y0p5aT zl@OIbvslpN0>DUgLJP#0J=eROA`(ozcdEa{_+y1f`ZahTQ8qC^LP3#U5igQ}g4R~@ ziNz8i%m@J2UWCQQ%1cQFsJLaL?kS-f;e?eJQ~>`6JY`SHAJ5X-Jg|sZW?Gt2smK=1 zK>Uo2f`ptF60Ys*L;6poz&U9FR*XId;lZj;M;G6{#=au_))qbqNr0IG(=j>co zU^_7l4NAofepGSM#@ZS{X#xsrs_KjecLwMNKCkj@k6=XmMFC*KUfwj3#(7QBpBv!1#n#0v4r6$=giErE-c7Ix6og-wMq$g%yP6*%6>8i=Pk{x zwI#~A_ip%wraZoSsd%`#hNxQsAH`C5*6!} z{pXLFjSby)uOSPtdd2a88K_N7OTvS`+I9^ zNOFw~78oZhetjMf?AOv-!x7i{sIlZTd_POWM02fLy{?+PpB6`A%Ff5(tj%pbkj)ckxvqF=B|RT(@P}!aeML2eda`4s7{}2!LOuC zZl&;;v+8|)!^V2?Jtmhe`qXL+Ai<>A!z$NnYs z)~<0n1#0#f>%uzjCH=QEWdcL?fU%{w;f7rY$4x*2Rdq(t1qYhe-zn;Lg!9#~_j&iV zm$bsSfU{TEn$$?XrrehG#8j46QtXls9&@jtdCpN7AqSz`XH5_SGqrQ3=l*VIdLR0FP$kWzuq8mp_q zB!rh*^Jpkv|IJkrVvzPw8cK6BQ*p9f19&d)=C9B#I4tq+{rp0Hh)B>YUl z$keU+MpZ||Ui4Rpx{KdCEHJ*!cxmE_3Vm^C*oj&tD40y9=)xv3F|j8xhW6*@fBWA& zVvt~Jq5z27HUUM$yb&>EcOnCqKf$faQ*0=v5b-6!Mt`*(b<7O;3ratax8~59Mfi~r z$y-`l;<)9@$jwS%#TWw3`uGjlSNwh9(|oB;@BNSRc|P?Tt{^AeC$_{X{+i+Iu)isI z=yF=#vQB3I{*Bj%DC@6gyH)!6a!^ucq|QR*qDsCbH}61MwMtXK?Qr%75|J&7U&XY~ zg=839Qy#wO)9-gRA>gCv>m!yeG~lhq0Jd2Y=%8T+P@xizowotA9@|AJSRSor=1vt?<$%2H zj0w(cA6@Qab+QBhFdJfB`&vEm{q9S;HnK}o948K^>2fR*v88(JDyiHgbgH$-bjVR!9qWaAK(n%l4gLzvd>D*A6GK`CUHK z##V$9Xa!9zD>I8?2u%EBB_u2=iFYc6CEbVfBspsoh(vPuz~aXCfD&w6WBx#QHw6~w zQ}tj7cZ*z_>WlO z?sFB+y9IqDM0s4(;!Be`_l2A5`k}K!PHS$>@A8_S^pTRKQvB=)tM@OT@BEwj7Ymtm zG88^XDJqk~T+n|ve$*WiX**L&--SoI1TVI4-&#m^D_7@^0Cz30Fv+g~XXl_`2+AGm z@}k_rqFicv%@egd*O#kI=A8WAP^hQMjE*->y_sLnwzAn`=CM4YKTtvA;fltmN@s_|sQr(aI5IotNQ1 zUQKB?ZYf`DH0(XQHqNdb?j9T3Y$A3h-Q99F(%0ezJ7e66%%G=dD1u#B#-qktG& z2?%rp+BR)5Zz4yeYfp)4hUE83dTnO-zqZ(R>v|!F7uzXCo1!ya#caz`{}_2{wyT|; z64D?t!Vl*0%)NZQ2A`sXr%&syRnB`?@4T}+U)P5@EO(FQiuS<2@9}>bK2DSa`rJ>0 zBM9|uWyim5L%xz1$Qk-2t!QUX#WY&-T8?(JX&$l#kD>7`s$M40UgJ7CIt3I#acGXT zBPr@C=c_G_C$*+7F5izx$O4vil%O5Z=D<%tPwRjJ2vAzhHItK9~*PdUU*Ojg!`NB*jp} zKr2Dg@*Zw*uvO5KHpXChKv_IZOaP-+qxJ|XG66#XH`A`sQrJ*Z)ny@POFsHO?CI7k zbf2g&4kt3dFa*&fqDeiR1excs6imeoLlN4LdYUK6F5wL!45uxL2I^|waigEw;P>kF z+Amp@tNcMszR-j4_rvtUUpr;fm-8Bd)6@~nj6Wm0&T8vlY*FM_C#KWmF-P2M#U}Br z-wLm}J>KEx&!_CBBrr4WMw9EzjLc-n7dRYO@y)sKXs8)GeEO2lKkNwBPABLF*93%A zbnNxkr|5~O!_tb_vE7j@wk^v2=f$rZ2LC9D7&zGXVJc*&VTm%|5CCt}h3>aYo$6#H z7=0Y1p!VV)6l9#>)-cYis`K-bMAqpjD12sdilwy)Ns2`h-mBx@QXZn(0^WX4-PSOr z%$P>qHrDnkw%#$sQP!u{OqjP>ivhIBp9UwvzYBKLTbUnyjp^u?_{R6lQj=dd-QmHbGcHoxMJxt>21K5laz)QCj$2YW-U+qZp3oHcO3|HhJ3mgM(f12p zCcX`iN1-{=waJN9j=owoSr^6Y8q>&+x#N1~qJfHhel5E+@9yRlLoFkbq}{MW{KPBK z{t%4&^^PY>S2nfL4q3QQZ}o8oYL4%9wsR!K8OF+;KT+@8(Ihr4+OM(-wUB&L9iN>A z$>*l;IJU44?kKOAj}8B3Cf>vwe%&p~<|h+IrMj6Oyli}^j0Sw+G!8z_Pt=I9N*#ns zciFk$NtsS~d^~(SdO6ySw0saGh0YaSC+&NVsdp`4^r`CFP{=);V(h&9NF4Q^t(A;_ zt%v70CwB*KR^wZbEoj9&af{tYNiN0iL4rW#xO*28+Z66hW}w+U=M3L!5wANMwYQhs zx_0U}T$&=Pft}swb$yd_QJmo7-Lls1uR8BldHB`ot3B|(&-QW}CVqr9tgFX)YB3+N z=`!WBtlxZpT}YgUarG3X_uFo{qsm;r8QQeH6Yo4=ZSL`qj?2S3%IZEQ8);(z|7v%12_i3c`29d9`#6972YUrgdu6=eh5vO z6%o9vb9eSOa9DwOde}J>DQzQL-->@_9TW+ff`Ee3Z*?jyP4_2?xQiMEU4h8uTc^y% zc{%5W=*em~Qs~(S=Szvk5cqGRZXbQvx51aDMig1*;x*d6 z^#69((o@Njq?VRks7y&E5tZ9e7!vA}Q;yN9%$GDxsvA?vf~4dP*X6WgF(IKdPjZn^4(ux*_R=^XKB4iSU{G zI3YbnCI1LR^yT27Yz0vWuc-yqlL%M@G+8*VvhV4E2V)xYrH-Bi)LrqfVJNNv2nYwb zJy1g8%Kq9^e+_0A!ty1GEwD?MwQs`qE&jYZ)RhRkcp8;a7?z%z`zBxSCuGH-8K-}T z%g~dn%*rHmk1bHod${|RT@kT+#ETx0aFHP!W9-vzVy~`?*ui|#B`ThJ_W5(KRMpEk z{HV*}bnHW<(7Usab8=dxEN=m?YY)zrf6EYq(3`T75A(@AJJN2u!JMytPffQ9GFW!U zL#fY_1|G_p-s-fPLw_l$m@K;Ag7;lECZ}M0hZnd)t#P2A_>lK!r0dUGNlPtn&{Fww zVzN1qa&uRZQ(=nmv*FR~SBEa9Y4(B(t>5Pcy z@~)4MMupAaiz{_V0gMapa6HxRZOP@fmwoCjbW)=0lLK{+ls{IYu5o`PrnniiZtaH!yZnU)d z#Iv(}Kx#Zsd4y6j*7aC@!wmPw1^>UyuKC!$@@IS%`dG7p?IryZ>)))Ce=vbAeHm5{ zF6jgDGgz)$12n6scQb`X`1fJ1+N2K6L(2_sd5MR*Kbg+ntJ{eqWG`~tYUgN&f6Ah^ zYP>eaPN7gykSd-1q`l5YL*H~nbm#?564a6N6}2`mLle#UrE|BhCv?S}Clp`@d|o=u z$E+FX@K`$3rd`J5b*esveY&&dp;W%i_@19U1Fdnd-$i?@=J{jAy*pLjO@~LHKyJVhu&OZhDZk?h8#G$og>_eKM>#y)N+tW& z5UFr`-DZ=f$}3P;hVTv8UFClh(c|8=N?f0eZpd(;t?fKD5UBC265b*ZWQe3L5ID+& z&%H_N3g~bB`VSf0MMds$}8-_7b`=w3!Eu5XWD%=6lpxRgt6iUyBg za6L@VkGqO~w{Lwbzb53F62Z_4{=6SP$+_BnJimUZW6yTH_PBCoX&5Z4w3wT_AAoz> z3pG!}{PxN_#ZWk_fpNO`#}=m|q22YdcaECH&6--hOY?TSa6Zx5yk*uJCz1H*<1DM{ z_6f;n$LIX+Iao6GjJ<8$8mC;w^0@s$f28Zf?sf8soHXQ-aH~(qa>r)r{wKHipCt>d z-GWZ~U+1Nyd=%I|fyEFYw&gOF92*D3ltm}f`!}NHjXfNQ-$XY1-o%J+HMw=!)n%Q# zEx$lTzID2<1T~>?@KXqCvF7kW#KR5fER(ZX}6qa9ZZdVs6x=)B^)&4!p87972I;U?KgP>LL!{saK z4K2!~YGGq3=diW9yeDjvJ_H%6%(Br(q5F?emCb4-Z_|x5h%PKuvhug+RJFz61y}AJ zSGYn2RoUdA2TIOh?YRu3{LCvIC98dbuzi<|D`c6snj$bW4W^?e$8A+nOL9~fXP>vs z5!F34wlbqg1i~q`{sOwE;LbEwdqU*J_#0mRgTeBhE6-`8jrmDpm!E9)Uk|$Z4Rc;y zTHpI9TZIIdCg*FS(3rZbwX1W{y1i9>FjJgE^i=p5Ag@}}%b$GS zyo^q}0g2Pq(f^s2=I>=^Wr!i#Lc~tj^c&fH`E^f2D(HSbVwH0d!W}8rne3VR<#Yl) z;0T+PGh`P{UNYRxnOCIPmi|FfMh1POgcFs7$OFQv6e*elD05T3!ldEGoADSWA}lN4dJX-%+RQs55H5D5Z~5F{iaUE)?jlQF>cy zB|nuI47`(1snU+x7TYhyjTO)`G1zFkbJOyz7>n%wrC!;yh%$vO$@Xi!cGk83f+uw`^f+Bq|*v+(EI|{(ooo{@7!s4_( zs9hi2Ny9b%b59r>vZLb)8kOdXFQ=hN7%nH{`iC>2qSD*`L6<*c$>R@=j0n2z(mliV z#sOPv6HJbCsp9hb@vtdAGf!wo6e zj@7Mk1au2o>`U)7scanz%ed9!Cne#Rhm7sZ>;%h2ix$QP_TEO$WQm?u-GTJirFGu$ zFqM%a>nNPm@oOv1`1}y#f*9Qo^_<}ocX$qMSH@NogDS?Js|ix7EX)H_yv_U~iZM$= z|9hLC1nCKidBw#;|6*wg=zgh)bj3_N5Cy z>WW(u|Mu)wd&f*^<)M{%Te!0c#eGeXsO!+Ra$BhI&*%a+(qH+NV4h);`Q@=OR8Lhy zDmQ`>0^hGGFChk$oED45T{8PAN!1)O*>F=?+JlFemxY-bs0DSF-q3*}myq)U9KGx1 zIyIHm-`=oA;kZ|U-Rr`0y^Y(f6u-f zPhp8=HdB$hELK`E`Plp4n8B=i_J3@k?sBKI*7`* z$zwF2Eh^v$>IhN4)nIjZq)sjfgCoL^{>x!EwghDeDXYEn3v1&}`x2AYYKFU|a4sG9 zVg2vZHeZU!bbJB(DT;R zU`BIa`%XeZ_F}AxG0-tlfmb@hL5)0R8Rn9J`!V-FU*H)vDI#~jCxN#0ai`sD0{{I# z8)A?xD>7!}uDVtY@Zr?Mv}E6F+^e7N^9DB@+Kj7{R%SA$clLv%|H*AhNXYZ&A+|Y|wIIE=)Nq0}vd<6(&dc4%sHMbD zFKL-stDP(qaKnA7=kO{*K_uUNpSgUeyZ+NqvS&YOe!G%|$nlc+MJS3+#zl)vK%sqQ@# zPs`j$s8s(&I{PhCSk*Dj!Y8Lu4J+Aqh4F7YP5JT4;LXZ74t9?yuZRx#h zo~7c_$6(s+9f(dW!@?@Q5cDZ7*3fch3H3Al^Eo;hpilhu*5^?nq}&!Y&~m4!R=%zu z50Dl%LM|V0wdQE9;L#okfflYmseMNX|>F>YQts>DD+bgIEPnO`}8(OdZ+tVJQS*aePw2F z-?UNw8+Frq+39HrTQvZm86KCOmoAMqTK_xZ>U)E?v-(TIwfNvI5LcYJUhn&GOmbrU zYNhT^i67IiGX=bNZ>*%gLPc3mzan7a} z=jQ_V`#}~cv;rK2d$>M~oISo(#d3uDU)w>Rj0}^1J#NqWy|s;BDn{G)Txsf6>TBZL zMewahYkX9_nmvBzm3Xkb?CcRXg~V>%k=+hP>z9c8wq~z5Xcs?#6!}Dj1|9eNS{_zW zClGu-Z>#PS5pkY?ruIBrnV2M{i-JK>>=My|C+wS=2Vhp;M?3e>Ck+0L+l%Vx;BD_O zAQO2y|5_QPX77o-jLmj)3BTI!b42?7#4IBn6{dt&Q*V6R5w=_%kk$6|xnDUbRjRKy zuSz{&zn5pLNat?Wa@p;S8x?E+|JO3R)U9>38k_A#ln(nWmE92925i^PH@@-0*Eurt zF`xTxwpv=;`2J53wIw0Aea3Bzcgtl5+gcOT{avWCt~d}USxEJ)V~^jQO&1bYwQp3Y zFiH`$UA*6kpPB)+`f9#3aam2^9Gvt7k*L*VbiFMIPGb{Zvi|i#Zz>b_U~~ zMJFfgmzzOJ5~WYWIdlBhH|z^$l~oj5+TtmVm?hfdWCKQ7`sAh5V!#x`)>oDTG}w2)_>OR@*Aisx-oHGPI7z1`T4ezqorV?e6Uf z>ENw9#A4fyxc^uZ1|;)OABXz2+FBUL*hg<;l{fLA(3gTd^3OTk5G@%?@~SSwG>7u7qtEu20pwV&;6OuBrE zeMKH+P?Lk7+9>|#+@Z^K&rrn0>N;M#&-iak*AsqiAMco1iL05+#vhu-?!xi0+QMgN zRdeUzJYg8B&UP!kue6ROkAw4Yc zeHT&IINI{4FQ!EOh5Dnj$t0{v6s^(wcLBeDFJcj@uI=h({-#Yxe)9w$S9bOtjg*>u z4*P7YBAi9Cj4Ke zvUmszQ^VixwJe@2fo9>)iB|z0UuMBTZi}(_S+KF>adK;?R%OBRH@B4T6*d*g`i8vg z${vmY+&6Z*--gZ%+7~N*XGj0y7VeEjE7_MiWsc5ytZA>Mpc6dR*G*tzwM(}e6Z>=N z)%IRVs&hM2Om1=f+WpTT@-Qv?{*mk*C>!;xT28z71bgsrW9g9Xn?h zefF!Ti0f4sN9ms%Z=F>agU|ubS9vMg>Lxn(%@)lt!Pwuo^z<+JYOkn+-$?pKhV9w` z27u(u)LBlY?4v>-r-0XXc3trw+rA}V)H5vzd3Eg7PucYnV_i*Tu6`K0&mwQGT|4(A zo%5RjXBLq^Ly1i!AJ?fzvI>i)$54><=*Y%&;-T=fiE-2B@${U#xI00aUwc-iB-8-Z z;)4eKl0KP6nuieQj3OvKiDnyL``F#XgHJ;;8BLizZFuUQcmV2cEcMN-R{Ytb8QGtZ zt;{m%%CS5eVd~$;OS#k^{Aa$q335X(LQW5IwiKRV4{Hg`{_j>(YIWq1$<|V@&6{PL zDx4FSAwhf{#T8MX^NBn3m^S#9Rz8;14KYnhE0**h{zY;j!INDpJ4flTA6LnkGE5nJN7y6i5UDlj&Ood<5Z)XTd-q_D+%E(qHe=jt9 z;LwthePOm=6e-wTZp%bS_)7>1c%D`iCMw?->hQRJ`qk3ng7FPY0L1D!$$+35-V4IK zH0G{1V6Z+{GC<-xeSdEyi| z7$fp_!NW@%5C=cpNFW#BF}%vArNzWHdS)68!d$}*19#Mjr3 z4Gyv!1)P2!WH_{q#m*c--Xrq9=-d+K;L=~!D~S#w37`NlYm1)Xb%E6VOrJH3gP0Y>1`9|k{L!JUp3>1@w=5`Eltd<3G9mE`qay-1pc~pnv z7KQCT%=t6@v6&QpwSyCjyQq^p60Pc~=i2R1tA65zdX`S zLppgGq|wr%d3k~=59sd{1iBW#o2SU3qDFX-s9KdJ_GfNkiYea30L|5AhsEz}n==8S zLi(2J_`(b;AACuj2?V3U@$D|#Wd*eyia!Bz3ls`PSk+VC8kdh4z=pwATX`(D#|i-S zo++xonwW$)rpBNtx+b6B39&@@eAt;9@HEm`Jk3jct5k>Fd zoStG#;!nN~EVvUHm`b!J0h2XdlplQFWdE{aX1kKbYJ3jeL}dW`p@D(H8p#YPf4|Rb z>F~!}>pPX@HB>w%};jRMPB)Wp;A;xZhx>u=qy55LPVTt{L`l}>v7TUm;^L>GzTK6g7+P4 zJWYw-wW|U=o&aga(npZut+gVCBN~ad#GQ#iC_0_c>{Uj>;E>V)qmZx_zOOrp36*@j zJ?{xzcv`xg2udLjIjRUzMp`^>joq6MpANyeOjJlua3eGe>5!LmgaZzE*f3<``1Hb$ zLOrVNIipM%vfHOWdrHd{R)`btia>LqhB}HAgcXP7u#f}Q?w z9i3o+`{Zzy)^#8hLxTIuC{yP4CQ-&Giw~`Pgyu#70MDE-#mWXI&YafY-|!Z z2OGREP`%+FG2g$gPLJ%6_tc@i_XDUFcSYk8KARtpHhYcS7a3FfS%Ma=_+VU(3e0la z&_G!?2~3k+rU@0qEU}@2?d;6I@iFE+cc-F0n)Lgee?`?-cDfFP-MJal4nvO_jdq1DbG!F0xochsSv5cnEw_8I zG#9f{gO&M-3cRuj72&(4To=$lK!kmWi%?xJhQ55M((eLukhnw_lt?r3;tD)~{X0UW z`%aJm1a|i|ix7{rH{+{G7#kf<7_k3fddMHz%ga0ChaqF_`fB~KRf0jXT0KtK4z6Q32}_Dv>zPzq{o}6U51is$wn$q~urw^%HEGCCFXA4eUr_1VD+wVaTIu zsgOItERdHMupr8yEZ)qt^{&ybA_^sP7(MgHdtOqPPw!jMLz*R68+?B(y~COvAvnoZ zEKrMLV__=}PE{z6a;bgKv=$LfsuU&D&*D@s%S%8&{{rKPf-E^N^$}mU&MY9A=nl^H z+?gj-N+*_okXnohL*CQXSNsJiln_0Ubj#qJ8%)f$h)2oK3NXcDV!sb!B2?asDU_o_ zL&K|16LTmEXG))B$w6Eq>4WR*c_Upn7UTgMVUk^kP$I^%9o zL=G$nGWWGW4Z81&97ls+HvL;)&)mNb1Bh7>txHtIK~Y81uNre%^I!BjBku*Ys4EmXCB5 zgZ*DqNx#;{;{LwAzV2@6$Xthfpv&;QK~a4Xf>%Z#MKm@su?tX|Kn(P2_v*ZdD{~#Q zgPyzh3EFoY>`c1X^3L}XH*w<0*bJp_%)RYV<)?v@5o<&$O4P*&{^CC!KJ$dUzTV#6 zt?li~D2!~9TLeiAM2vGi>EinU^?65(7c-Ty7_3*mV`yk(`|A&GtzPUSnqO6SlH|g! z)6^d0R5UF=atqa;FUg_6G`V}4l9iEf!#C9u!Wo8k)*TMUlab6AYv%JUs(BsRU;4d* zER*kBRm%bIl721n)9UWv96EK32xp>TPVQ@hUvSXv!e|Y$?HUS zR_C}#(wf23r=RqiY|28GvJukvU%B>QdDWSBU4Aa1Txi?WE)_`L+In`8CzhD`dUIm~ zE&V~{zRj@B964APg=k3eMktTmxe^A4cl&8P1rg9r;}+>2>owAp&(o@J!8SLK*!w(R6!v0XEx24j`1h^-F1}%7DzNtfG$1`a z-4+Zh)}^Jd4Vy}bTB7N|DWn3q^6h6v;MLtM9$xClX`B!}z=tO>*{|;?+{BcQ2*6gj(Y?G&P^n_dG?d zZd1PQzs@BmtwWE<4K0Jiu=MIIWUo#StjrPWShx9604;F@OR4Fj&(&QjS_kU(W&slK z%3ma3As|1U)tpO>4FDyrE|N+CK7et;f5|&R?x_f9daxH4&CLsv4nve;C_dkvQEQfy zo2o35)7IARuapP$L#)p>Q8y-K_1xarrt>It0ci|)&}SI3@RX2NqE{xIjh*fOYXd>Y zKx@VXNb&Bkvqi#(tZQMmmX=dW&@#1=#x#_fSEdxCO5($Z4~mM4&nXRV!sA*R-4ZFL zkwkO-KKGmn(4hQHjU}pCGjEj1^(P2Cq=1X6BRb6<2s*78>FGUs_L&~l;8{p{x6hUh zZ9mJ}PqH@@h!9 z?P{V5jPb8Pt%vm7G#~^%C#mx+STAufV`~De-L{HJDIK9;WIoWQ;EFdJ5 z$8Sq4Q@RRbEJV7_n# zGpQ8@;(JzZ@HO$Db*jqBDY|O=Q_JrS-$IoJ3*Q|u29n2oiuJu>Kc8Q=kV9xKPzpRJ zwdC*H*d<2Nlp--K?*sw1J(PEbF^je8+w$$NoEKKZ1CQJH+m%(~F9D4Kiq}saJFr7u zOJVT)quPTLU|LQ z7p(OV@`wXELm>QkCnZWdLWN-D{o}>?D9cLi(6X)}=!4u>iL+#6-^_>(RzFHUpi$41 zlVxQgxUFL>r69i9f3Mw?bv$+ct^=jcrG9$B`EQ|P)kA%rJ-Q;PvD9MTwKmgW%?!k> zUhvo6bogD}ol~{2px-VpuU;&$BNUalF5kLh<6={a#ET-p9#nXO)&=?ySoo9}e})nx zQ0cSi9=Eu;D7wVV%nN{!d>9=a9(z`|JEl+X*1V(mDvb;%Qen9l0l(&qvWjDQ`=d&(LYh26o$=L%+GY*BUl73^ia(1IPL zSAGreIG%cPDCQP$9cU5*L|7v563`#u<%!LH!Qsn^rds1cm*%}OB##dgZ2)?kfa&Yg zFRSr~ zbG%MF_IE*wZl3Oy=UU)GP@=MlZypZ9{d37J7B3wh85g$Y12FhvZLN#_xCEf0! zX=dl-0694ZoeA_o`opBl=oT)hnI_(bbWBW47t8BU7C|X+-GJ8d0x@<_o%Pa3 zOY;kNXUe$1VIdL=%4z|dDoyO#9(Qs+`#>LGe>b7OmpUye&*xyO>pHs%P=TuCh-g2S z5cfEdUqLOkLz!-E{Wdc*qnY)Dr-s8fR>lXK0GeN~ztCzZHRA;S7A3SvGF@qI|Sa$jOZzg zQ)S%T+#+=&@UE^GW3JhcXp&IT==A?w+sK6hp!Y!XR6|Y8Z2H4wpgIXcTYhoE)XV|F zvX1_DQcWC4Ca%=J$E@Q-^CHtnZHm)}5M$l?=xsJ_fCaWEynXAWr+07v1^&H<<4Yx| ztgztH4YlXLKZ*a1T+0AL_3#!abgGs2*m%oIMEM-&T9{(qZ*@k-IjIx zfXeLeb38_yG`}XyF*q@?M4NxSdZM+ht+3Y*BXo3hs7O>LsIy%VF<<}l@KP8Ofc6i( zO6={Gd2s!kTCgT3=jnZn0v)S<#q*6nO=V|N?vxI_kwmBnf;40}&7VMq!}SD06WM>B z0ErQbf;_)Z?hS!pJXe&F(%MMt{Q33pkPvEmx0(71?{s2noWv1!)N*G}SLSC-XM|9} zC0y$+5Y7g{9T9BM?-N;z!-@{J0PI9y6#}A<4{DbCl$1(E<83ELfdLNIiyT~R>$hdj zW6I>i1PZ`Lg6pOLUFBmC!5hzlHsjVat_|cbHq zovC57uI`L4uPH$oS>}Dj_s@SpX36ANRqX?=6tk;=p`jwR@{)AHc60eB-KTmT>xg3_@s5|Jzm! zVvssIGvK`p8ale0tE(9&P~NJ&L2l6URh}LlX_T2wBlEx+_UCdCyc&o)fz$>YiWue; z5(2D&ZC!`e+9n$}NoNXFuGzN~HvjlfDCCtdF)=Zqr3(%TDd=T0DjnZpzAFgOlW|;C z__4#^7!`Ppz^SwsIZ!#x|)Kh=G>s{Y*q)ZH%zj^6nSf=o}Wu& zrxHNJ%BU=8BhaH%^F-=-o2Jhe-JvZmE&UYHKhGPO3PWD&`uh4le*75VxByFaD~8K- zAU-p`gE&4HwTFW-K9{iqE`WvOloO?;3QJ~H1ZnH5n;2*q>1beBH7G@cq~mt);$7HJ z9-;Q)SL(QlZEbBiIn;VyYo(c)`kF;a#A-O0F9;RM2|oSXpll3bV1-#KL|ZzvP!uxWnYaM?ui*Rd7!1HyToMsPmt}nKMhpTzin1y)HPU9m{{>RS(jWi; diff --git a/doc/source/5-multi-publish.png b/doc/source/5-multi-publish.png deleted file mode 100644 index 8a373466205ba656c750aeeacd1f494caadbfa8d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 41915 zcmYhi1z42d*EKw}bO?fUi69ap-5ny`-AH$Lr*tVH-6hh}AT8b9AT8bSo$>d7-s>Bg zi*dN)oPBnzwa&i7+^h1m7g1X)l0Zh=$S>Aouq~%1cLe;`i6m|rW1fKk({Jt#E@2yQ8Bn#aAZX0AP{nhl&Fxh+x)?j zuX>_$tH9&y!-_Ef_hFgv!34P3HR%gO-2QcFa|(+ry>k7Je-2otT{LSQ+4_#=>zoOG zR2L3Q5mYGsv{rq;Q|;_BRuN*y;T*$dHQ>a4=VWBb5XQxtH$F4=l3cRsa^ouIq_r`$ z@zWQ7n41$XZ8f#_-`*?7cit<{t`e^9E@T^=cE?{~VgL7%F;uHU4g2q9_QOa}SoFVF z0x290<$tf#bAs6a9X^)V>lgIDqiOgMssDE{>+U;pivJGIk&1yq{=aK`ph^DUrN{5M zoBW{vsmSZ;5x@502S0zDxQ#!35-FUhG71YbQ?`ePo;XIWsIC1uCJbWy~ElvJ*IG6|!U#+HKg8FP)-arLKIXrmT$X5)?)o0VZc=VL#HK;|q zla_3dAYVSxgyn<*+s|6De;L{qs@JJDMTZW~j#HoItMUpk!=eqxz`($!5qgc!g9jI} zJ$FWlf3Rw4@Q`VsYN;5QOG!zIJ}V)`GvSXr^v__4F_3`aHpi&f=4C}ifB6~vEidcV z)YavH@h~tj{Q2{TAZg=6?@P{8YC1aPo12@y!9fvSUD6*vez?T}PoOo$2!*q$Cz_j^ z!-9vQNkEQ@iYh8Ek9tG?jxZ_^k$U_YUhY^CVQg3qZOo+|Cvjl(_IJ&4@WIU1Hgw8@ zWSsS7hHPj~4kar)J6O~+adOI>?=;?mEQ^BF?mu79&A+73%jB|T$0%fyaWMlP4yzsH7p4-NzgSjH7uizUbM3}szx`YbbR>pK?Q z2{RjGHk9wevqtkOD>0EUh}-?)kYbaQX+mF_o0~T`g@=Vb6MUM)Z_y>g#R2{^d*}jw z)XS%Z3Y$e!O{qwz6r*B*M?u0o&Lzu^RZefb%&e>u7ZNfuNL-9jfo++QVR0&FB11wD zf$z1p@=smm>AWZ6y5Z2Tv-xHrXrDjSKir*up#Ape7CXeheKYY6UbgNYVR=L@INef2 z1diqXd;4AzSHa4e>QQ=VZs4#x4+ASi&cmZsae{4=PayT*I`s_?M_UB-%8Ovu5>f&` zl9ZGTYse7K4Y#;4prlM)q_GlQKXJ2LZTJ74D>h#7HEKUdD>lIw4j5%)Z!xcZb2}%g z;jamqY;9(He=_En!+F^Kx9DgL5G>TMUadv*9JL?TuMB5_Yzc%+bi<_9O2r;F&pEHeJy7r$cKqb00ZUmGPMwq?AE~l)>PD09eFV zz!4LIYHlNSD>1ceqRNNa?FgfV)0Y;+~z!`O}*#nF0FVjwo4NUNEMppFZeT=*7w+O68eMY z=QYD}ihfc?P3(#~I=+YIE#|_Kk`Mee;98PDEt=Vz>+A77FAqA5V)$O*L~k-_2KZhV z_S)Yqt)wNcg((8FFKXA|JpGl6G3vLbXMSa0a-TZ5)j3_RRrP6v)aNxQc`|~E_S0tK z!Dg(4kPan>&bZ4RJI>AZ!a$iy7+5OF=`syC$mw{?2QWC(>X%$xTp)dQ-(Ku*(Xo$3 zbApOUSP~?i!l*-N8)ai|?hlgt%s%?PtcgiU6aoU78PLiB z)39W-xw#2}V36{1{@G(0k8cC>-Wkb6!Xg+0^#Tn-a7~{JlJd%NC`igKE-rn~@XSrV z`uASmc#ZBl{k?kt&cepVcDm1>DTg6-@6*;iL|@sAgVV15_Jy*FN);locK1*Nq0st; zN0m`8hO(L(QZUuanc?oANF0v8Bykg|KG-z7Ysjyts;L?EMw3uWKI@tkep3XrSc44Fx#rv{nO&#WJjSqaXwRc}>B=7g1N z)vtq$bU+Q9-u~_UE*p*z7AVkxby1>?E9(1!&jKKY6VO@>Kt$J}OV_RwaJK$jA>a%0 z8XCw^;(@i6GdT?nU#P@D)QUz$(y==`J5yjD0B_XaMWMneq&a!h)8QF)CtR=u-1x^2 zmXB%dQ6yj)M5)AayR$r~1IYVw#X(to`vUdAg~!6eVl$2r{Pl#r01leh4j1Zkb8|%= zHNlXpoVHSt+l-HsMh^vI;4UiH&I>couUs_=*{x>%%F5olKp~XQJ5cX5%abRp!H9l+ zp7RsM6$2>-6!Ct&i7kd-hpxUcx#HrpV*+h~U3Vlxdh`qo-KSfFb6n6t0r#MY>h}}1 z{xCF-Cl1Y-x>X7eb!YFJFPc)uCh&L{lay?r7zOWoGuOh+%e^N@&r!v86B85Kw{MOA-zhF)a5y1U8zXbWBY#~UY}X#{ z6YtqVsQ*n}lyzCB%FkGzljW8Do9TNys>rHnIQ-TFkq2KJOcW{AO5LcYqo0u(B2Y7R zgyj&h;(ZGT9k{(%jl12;YtbLw?m1t2eoK@hivL|i;Kgd*G>ol2}eZ*FI{#gi0gIzLR3k~ z8xU0|u+N9T@eRt7=h%NL%3W^dPmMY_JnW7p;US3rhy|`?2l5Uop_F0$LR@lk&lGh1 z6=PxJf%g)ig&^C@0>5|>i2wnP+aLMyhpl#l-^*pkgJ+b4)!d&xp;_K3pi1WDd$s5? zK|-jW2eZbDzKlPZt%AeD!|NLuFhm0L2`ZS_l~-1V&yejM&O){w=#K2;^iCxa5;dB( zgNs-AYDg7KHDJK_Dt{kL4@*%po>vDI@@Id)3#|I;iQ6o2G1ly_A89x(NV{kcB5QL4vl}0Qk{U-qX|5a3F!( zO<}!rcC+fzWHe7ReT7-*En?j(Kit1_);(!km9A8FOt$y%E^(hO<4L}biZW;Rvzy@f z=qMy*@RhGFxKD$u?|V-l*iQ!3npDCK_Eaw1RR0< z^=l**6%`81U0^?CWMqe-XuG?($2I_A0mXnGKtVzA=V81Gl2uZI0_cLIjEoq_P>M=Q zR4-(lIf+5WN?v&Luwqjmx&)ZTz`|SIW&&^=EfZ4=U0CcT)kV_CP8T2*f%8=^hnnu0 zwY9aUTvEr#YOB+SE=CWx7ee05>O`d_C7V}o?)bF@e%<<*q$OTSyggLZl`~mEcX=Fl z8Rr9TdBTyyCJCJJJE$)=XGFHTJ$A-8LDXPHPPU(r;i<~Y%gxNp+V`ePq4CbTJ!i#U zrq@Oafv&0$7O>}<6SqN?`l=JR{omD&FPFJ9`&$pQR&_!WZ=lDqr=feakb)onOw6I7 zq3`N%Iei~_%Tz6Y8NEb<0x(dg-BCuN#X#k*OqHG(qc`?;`1s1`WWC$2X^HmD8(1s? z0{|$XWf%}4EiL-Y6xs;zC-o*52L9{Sz$|XY99oc33BVlyYCnAxRS250NU8+%0DYGu zN}NiMr*HyU0T)0cyVK47EOPqktI}Gj(kWqFF;)(aE{j@W-0s*r5$}Z!+KA#<_8T*U$087-EH|X$``EN( z16^D&dQc#l(}qeq;32fW=m)8PE_IZ?Y)Gev@~i-X( z+XTl}jGo(C@RxeBDmr$!lrtR_4}r>Q>K}iS7qSMm2%H4IlY#^o<*b+y>hC)@v+D#{ zRZ&?P@pJw9ZSl}L`_WcdD&h~U)_1}YyWgY1{N)uC{EnTFTpnb;mS3Ej+}zAe(9*K>-d$?f1F$SZTRE2cl?Og|6Qar)B70I+kh#>!`F=|~ z(#Ow>GE45N?u)u&~MQ7R3JcpH$K|S{-gzeorsDs{C=RxythURz)PHu;&QKMXmIjhPla!V= z+8J$dS#5`zoSFjlRa{+0W~e9x3a5evNidM+7ZxZYa_-pzJN!77-KY$)nA5Q+UJQh8 zNY)DH!Df!z%YEbJdUHs0}&Az+w?v7 z0oW|$-p9PAzkSOkG9m(MH#^QwL~fDY1c1QNJsv?q4S|dQ`ICE^BpWNzrV$;G->xMO zG{MzMM^Hr%2nc6Dii2~39is&+0gw#Yl+lL^agZlL+=BaoI|16i6}sau#*M^3E1k>wnJot6C2W*AZIzP@{e49mQaAb`3Pj1&hwfO8`4OlF$s9 zvZ?O^B$TiH){B2~db+24=s%VSfItc6D#&8s1CRug$yW2N4_rLuDHQvY7)xwe386*p zYNcJE2spXB)KVZ4rf}gz$C_OhkP-vPi=zJ~RQNkpRJH19?2AU#Xv#P2D`{yFDLSd( zicNi&;r2EEHQpXOv~y+v%KqOQFE>zPpyFt^g0^ZEm)$Cao0}T~Uoibvt1+2Rn&QQh z0})F_1}gdYk|7|i%+diTGzF?>qi%1z=5w2REye)u**8pJ>>%ZW%c+ChZeeLT2G*cy zDY`onzt`DZ6_m&*aX1BBK>mm)w*l{M92zJl9Jr*bk0WiaJRhJ%45awOO)H@RfLoak z(KVk^$pb}3!X)O_)|SvjP#&|S`(enb54k+2%AA1WYQDC(Mk7<{)ElOLEkGy1eE=)~ z2)19zWpdbeD3$H>hCXHoP`VZj3$-JdOHVetlv4<{#;zYLi|zpA8%BQbYh zJvguek(kgAAJ_qj><$3``97bKI`AnCa2U?S1~;KJ6(@nZ+#G-KT7S6i0=mHcum|4wC-@VTLFyvNg&jf@rGi&SMnSC)$O~QW*!|lyEGa6)BOR${$*GSZu2V^4#&hmRcBck z4Did6wsJrqdRx-*>*2yzz%c}|;G0he=mByBkcmRN-4qB9Yj#DlZQ0@7R_s87$gQhW zbrw}nP#DA0V*|6gMW-V_@o2GY)IQybd_2-Rkc#-~fEx~ECIIreA^@kRfiIpd9x_GM z98h^PER_Is?cg~pjIX9ftb|;(Az@~pUn&ab5c9>`A8La&qiB-#MU8H*)@!Zg{C74= zxhm0Ad^_uj6g>)nJ+m(yoc7)j^9kE00y(yTTDH3zRS$9LlPD{T*Q4&_v293s1@dsg zbk>ZMhBrT$Z1`OIjHFVM9LEgF8l|DP*Kn!qkYPf_R-jvdI-LKBvz!>L{0a%zL zK7^d-J^Jtx9`?S@Gt}x;H}k-IcaMt2KIQ0sqb?M16;kWSG}`jZ$d#8a2AEO%9pJb{NP}Np%31Y`qZRnSw4F2 z{RG$MbF_9aHAPj*tu5}7FOQG+0^8{SB0U4MCe$g%z7w0Hfv8t2l3g~#1RadD4k7>} z8yL_;*bGHB9z$0R>=liVkK>#}q*)bBKaC^^s3JOwvhrT)_-|FSO(Ymr=RTQxBY= zU=3#6&Hh~CbeVEmUA3sdCU%KMxteMI?;;ostP=_Pw0(h$^ytF{s_uz{`n?JBZ)??1 zXzq2qzfU#FE60Y;XV{AHZVS1X5`w@KqLQCZ0de^e71=q_J1*lZqJe>_e(Ja`_7ItK zdzD&{+wVYdgTY*>py01Uy=rw*FY{*iQorj~b;5 zKMW^CB^Tnr1eFtIZv8~LJfNF;KuMdmWX%Dq6?}E3bo$V1&YmCvHi|%gLqSEgpV4ux z5?Ox{>z0ecYq&N`Kj7bi z3|Ctj@JhGp8u@g~R;+qEbnyH~bM|=rv(}fb&75k5)xRL@97SYZM)dNY+Fziu7*sF# z6~O#oFFK4dluO(Rp=n?{}QA0D53YZkt_vq0n9D?upkijk9E_C8gI5#)L^x(k!Ec?ow^a4XuE;vja$Vy;)*l%XP z`+wr&f71c09ANCi4u?bNUM9lgE>&s*<2SfvD2elW0LepV`fp)bI51@q?paK7D8soq z?QaPe(^WT%3*MZq+1CTke7^-WNiV&7aD*`4A8n zAGoC=781_&cb1e~cR(P;kfC|bV?^Ne^nmG=692+b}R66pp|vN9oBsS;=Sj zD?7)*3y$p6Xzy?xxY5VlsDh~hIZd|_5oP^AiAb$$`^1zF3Oc1&!T@Qm!n^n0b=sku z2XobEk~Hr^sEuMOOP?y!w+DXysZ-E!Hl?NbrJqP8G#LO}%xpnjHEFI;H{|vdT~ud& z9LAUTV+x^C@Ok*c20E9mNgLMKHB{s`a}q<71X4D=Z#ugryJw`hOoVC3y}hL+{ps1+ z99_ake}43e?e;)wk@z)*7r4$p4b$qjLA?O;RdCw(nxZ2V%a#8#F*C(sdZ#*97k7aA z5b(Q5qV5+9Y)DuSjdv?z`4K6gszf3 z<^3p50%4{OPhg|JC|a5M+%w4bKF2%E<)>y>#Q`iNI)(l7HC*F&=J)I8@pj_5+aZka z(W4ag(AE8EzLlxuQ%IveAN!S*#E)KzUR^abJpi2VC2FwjAD7FC()x?P94@fJgHcVNyb+rTz=CIo3HQtc5H zXPXy(<6$9m(Z$4ntRwn$6lG6v8|**+NZC;T8(C-Dw(_T>8m+ah4^ek(xkaWKwI}v38a@9S{zItxy$V z9b=}Z00IZLWh^|yN!rjWA9E)YZ618knE@}TpNhV-C~H$D#o`4LMpD|bFet2GOwelj z^9=GaKls!WKrg!kON`VX2IX(iHRifCRZx2Qu^%h=%b4nO0WSEMk1#5miKwT>RK|nZ zhk1C5016HP3P@OJS2lrdrI?Xwya*tZZ)J5Xt)UYhp7zDqkWG1z4AkLOmWCA92=cG^ zdn?pckXtPP6rrwX6Ub;uGhH?*0 zl#dk?ea=K}AOn1x9yj*$PUn~7HqU1g+Q)sFg=VbG_YWCgGLrb#81)CMa{ufe2`^u8 zkc;JJ$nPX`*0zO!e7MB~`4vlz@fNQ%d+QY&zkUYvw z3DcYC^;!N^4q{KSsa+=EJ$B7C2D67RUYK??Ho^AXxn?~$kQ;ixbAtc4wcVs&tj<^B z{iaq1MXHmbU#qSH!fyGe#o}?y!QCRZCc{UlfcOKKt6KYYI|l1dO$yR80Emlq>3au> z8UoO2eWF!N3#+WK>n8}nIQgGE~Yf9fi_)x6%&>Pm>;X}B!$42 z{U77{Z?O?cE-2pq4!G`ptzT-1GC!h!H)@if?Qo`Z+6xDVA?RHa1}f9~W3NG}W5=$A z|H4LaQy0K>F95P&elly^8(@oze*F%26DDQldFu)az%fcoq2QDwSKpVD?8Qs~`1o6Q zeI|Yf$izlIIw*(5jOK5H6#xR5-P5*nb%Ww>u$WnyYAF6=Ja5({^B$=wp``%VM37~< zxpAMPsO;H&5)0LtCT?W|DjgxZUpp=8&g05P*glEZ)xzN4I}tM3hOrl-Xi{LR!lhpk z0VVlf*edBS{q}CmWC)FC!07uJndT7d7cyS>=N6t@vR28m&=LZ+YmbQ=}i!Dd|?rBQjT$(RWF>1V=a3S)|r;KH-J9iWl zUWs>>vkz~U7xA7sC>B`4E)>-mFrzo=b$57sZn|tSsRtdev>9km8c2-|#|n^+hy|J( z+e3G2WmJi!ghg@n4B+BHQ_juC*J{(4-Gn|pyWD$wlWs(1>xJyc_VO{qPfJ=Ql`WU2 zF856)kpuL{t3x_%9i>dxy|ydd#ODK*7Czx=Haa0!D}0|hTpwuu)Ivu-(?;CAw13nU zxYD9Tx<=V@xm}@lvTo~3h8tgW$OKk7{pcDO>FTf}tw~qxqEaui#Akp`j)(ne<7&*| zz)Z4fdGXh&FR-gG;#EO z-l@6QU-SC=6}dN=h*-4e+|2MWu+k`Pz#-j=?su*yLP_s;MPe3{XLOgNk=i_8pH{TN zw0Z9{^Iz8xF+Cn(N`9+AK9DmhiK%G=cE%?2nvW7-CFugT`TY5}CEQ_|htufU?&aOz zRo7!tkf9oOr%kRO*4kDB<(f_kG5Mf(`Ez+EuTe!o&HJ?|FVuukAaAX+DufyU6xI{4 zr&jem3~|*7>P`(7Iq7|J3?r-ArcHv4 z9mjer5<5bR%L-iDZQ+2ESIN%@ku8DVm_ag;2=PeVs22jYyn$ehFpz$_35BzDUkcwf zN=SQrz=GWhoIfOgYc{mxPRsk)`?`4R<;~fBaOC$hS6_%Y@Yq) z_`-=ivzlxB8zTn+zPrubgms$Z{rwHsTTT{{_Xwqtq3jk>6v$)0dg)SF$9B)yXmL$@ znn|EoWi3sfP0~mEk>IGwmkPS#VEF!yPseZ_A%!HBt>?VD-n;H^=^7Y}QwI{J+B$(p zp{ADPvhwWD58f$YN=B$H;{6=jDt_l$*~Dc0;p1a~pt?gzF5hqWd+mJmxs<{c;?9l- zPvHRKSG&#mhzkS@>I; zs<$4T>Pzx|hll6=&sQF$b%zA!Eo%jMQ3&{L_ll2h^EKx8Hu+!P0VtBwA3=ePt#Cl| z>sdTw+wtihQzdBd`sf$6CQ$69Sz9Mjzan6CVQ~NL)km5 zQ;XuLRsn$=aHL?~Ui6c{dnHeOC+y16HU@d?er@o(FC)~^`nBC=<4(^#U*~?Ijquwu z&y@$FI9{DVn>I{LpDS4~B^3<5#O!ugHHL1GF5?R=?XI@+J(Ca=FppNA|2Q6qKo4Xr zC9)mizcG_%09#y`Ndj|z;1|czPpi?K@8h&z0`D$^CMm$&H@Cde>5q&#g%jRTui>=k z_U;^a&m*~z?@a5ptL~s6OZ+3F33DM(C9g`&xDcq1c9JZ*DG=F@QvPejFjF<^PG`dgO)u-obHWImEo$)F7E$Kbc(g;sV)MjzKHL_UU#;GJ_*#ML%We zOJ<}-fZxkA$6n}E@t1=LgyM6?40nzn{m8s`7C=J$JCf%6*4Nu*yH?0Rmm4 zb+lg{uY^HUF^C7TeF@)=gWr~*LiU0nw;Pzi=!)Q3TYU07cXEvyxN*|K@JL?#iAFb3 z%Xxal!wIUc_;#(E!$KwEl8;n}^|jR2gj?O7g0iYryCu_cd%Tuy!btJLvB+`&QSLnc zm?)|gB%1oA`T9G|e*%seRKR(|;Ae00#DsW+!DQvk2vFS?bGKp@A%N9}NMIvZUy@~6 zJpOmzkl5ICxdZLE<2D*^C`Rsv{8P>O;l89vx46-;RGklj=bNv}u3v(1BAC zz}x8(#VI=0f^lKaG);XkeWZJ!Tv=KA`3TVf?TsfsEQ|sflC?U<;K1o%c&y9H_wcdL zb2yBWf4P<@IL4-3pP=9{?#;Z7iJD0=sY)TsbE<7YSu=t+C5vGHW zJu72=R|;sgBg|o81o1%>cy-OQ6MeAE{12)e!uvebTEKJr(gGAwSq@A~YIa4kb*ERlXkmywSH@-Y*z0&sP^u>WV51 zY94Tyy7-pCBJ4&YxG$V#df`lth&1{Pv?f&O^{#aV`eaJpNrPo-9$e`aY4I_7LoW5< zL%cvMXwURb9W7Ff)>e!*qcoIcVduJx0H%h4Y?Cp>n41tnd1$eXA0V?Acd3D}I9eH~ zDpUS+p(*^HLe8EiUy%N3(;(0T@(rCOITci@k*4u6i&jwc5sIpUWDz2^XCe9xu3EET z75Vh6GDXvQcZ_o;AC#Le&n z3QFBQn#N#qaI;Hx6A?pYTeNAK&Z{yFwHymDZQZOdE#4di)o%RIRci4^OS64 zEjrcWCjG_Nk9K@#whA#Q5l%jQg(ItdXCqy?MJ7~I`0%(^b2*x5>g9Ut*q=pzVm!It zMThN_aU@YZee)Eqcxd;DzPR;v+z{Wac++S&WBU=2T0=vF)90QOvI?G2*f!n?Fk?+W zy#M(4Wx_IyqW>CMlDY@S`}YEo8p`)$lrlFe(lbGIf=sd~MVsaLG;2edhfW4gON1xOm=h zS}2IIl*mQ?mx?ha`U~7?Y4%5MLa z=S3BIpyY|P`WUg3E4=7w{%gYI8}a!&V1_-ivB&j~*wO=W9|3vqVRV&vQfw7*ADuC| zh{1LAYzA#PZ>P{5I9cq1Lh}vTtejOko&R@k&9S?UY#DV7`<%s zjNa?}ll`)!=&#sK2WG6}(2|^TxdBY8;oi(=gnUr=WcTpXv&wja2Kbr(al-R!x0xdf zGHDcK93@3NVAZG?0c|tWKt&ct4?O@?*8^I_VNwtIN`VX zh%@Wgqvz)nr???tDRKE~RVsQ}s5m&TYX>na_pGJk7F-@$uCkPH*SMUX?aa@Af5Lu0 zjvz!D4jqMX#YhxrKja*@sz$=A=d-Bpr}*`GQG2n*JlDedtA#SeXZKFRdk!0H5x)`bF=fP0@@$%SZx&^Q5&z`ZvfOpOlQWt@)M>LJ9q+-^w zub^wAwwZ;6Vec=Xe+lO3G1n;QA&Ny|(v1?hm~C)H&S&`lq_tGtQz>i%TN+zi!EjXS z!#!KG{Jxdi31VR9g}s!{*eN2LS+8P>63Ae4lbuB2VHN6<@laVeRs{6@^B4@jbDer~ zlg+m{84=@pm@J{c2=WoezS{sDri`4x^HVGjkQBiuDl*%n0lX3=YRKxwQe|k3tG>*O zo#%4!e82Vn9QBN;-3B5TS0F%sGicDsk%dpzeS7~20*PdlV~7xU5J#+NyCMZ0AVe1Q zPcCsT?^%?uoB?DZuALioyk#A(trg>A6LI%}`ikOJ&d#|xvHOubS5Rz$ z7Q>tUwr#jB>|#Cb2p^3f<`YGWRBIK1PBDm`RiA{#9iUhB@*UD@WGP|KM-AZhD4jD^ zLq`@^uGRWx$SRE$%lOP5)Fp1v4nV$plDQ*?Nys*UEv>ztA};-n@ftq?JXzrK;iG}* zD*kSjvUFEfecQ7%O-{;pK!>}pVqG_CNUu@}zR*j3^MVAGUgbjt7>O8fGV` z1jsQjQ*M;I!_pg5$dZvP7xeTndErO;hR}%&gLrHqLNG&upc;{mfAi-J4N=8X@n}ye zU@bnr!%DSw;Ux2g-I0TTWKL^zDkeS%qNgD&5<1OH>B>59SftunkQHslczt<<+KAyF zGZ9#kkeXXLYqxo`llu*Wwca>?j}67u7QVGUx%=W)$2@SFJkIHEWTP|1TQYrM7lZXQ zkt#Wn>T8(zhlD@DEz3(!G2W&$AEc${-o5+Z?)Rx=d{z7I9$DPl@m_*jnnEs+-$z!6 z>c{8@9131p&p!%={|>)q>&j99O@laA2ycr7VL8jJNX}`8+kBZu$uTm%Pr86TZ1uT^_I-}Tg|o>*o4vDxUjR+V;Toj$u9Q5{JWKg+r7TKl==61EyG**DH5{3hTVG-H`!9VYbj7zpEn;t7#Sy|cc zT=8>W9M!mF{488B#*L8)U;Nr%=Zl&5@v5oeCTVN(mx29OkrqVe#fFqQW8lHvp`C!v zIC`B>X+jbhcyTq+rU;p9q&BCUY$vhui-`1I{c(I$P79*j%c|5@&lIl2ENpC)FW3`d zlxpr&ZJy(bqB%;l#z87Q^`=m%+kqrcQ2b>>c5`QE_Mld3xsgRHz|7Wmxr zs`!OMlr&j7SKvd;m)@-_F_6}<3^WJig@gPPCv-zRN!H(WexQ^|3m0pk)7_X-32YI% z@!NYyr=J_`Q;H_<2Tv4*j)v)T;`oMidKrI0Ea;haZ6R`DBLHOoVyi9-PYb%ugCHZ5 zk6@2Z=hq(g!u)9s&G{nLZef?@PUKt1UMku&J7ta=(coGbE{kNT>u;S#$~}dbyD2F( z2(aDk`AEHI*8T$R`POM7sA7sMjnH96@jM>#n|&aR2=y9Wo+s{%$1ZM zmL^{QX)3M!%J#=I|0f+47rGSylq{@ys{hvuFi{{QVrp8j*F`@jssDg&$jRa(B|Uw6 z-vYXfDwWCZ_pdm*RCk0=ep-}y>mvc!0e0!&$eHoP8*@I^2n85r63~$M6Q1)yR;?_A zhRfRoJJUn;y-)o1JUfGlhnc(8(1Ra*_3aMus1V;4qYlI|?_?KQCShUAY}$$DXh%5r z7epx_5OMG=ww>?k;gwEFVzvVcMODEY-(-RJfoS;`W;#+=#hd5o#iKiKTlfv-v$SFI zs6rH2x+oYPQfr;ee}!(}dmeRw!%pCKI*Pi`G7U9l@*bt};maN>Xa2(N2x+$+Enk1r z!}ZPt*kS*;1qqOce*ZAs95GD?E<4=^`U|h+MK3X^`MzS`ayibi{Kg3?x>Mh4>QMdD zkMh$NQ?AIDtTJL-W8&LqyTNz^!FEk@BpO?$~)qh5`fnaOyl+W12gT#>aPWQ zw+VucudJ`%;CpqkfiCc1&~+3MYZ8!zpf#1;ffwVhzj9Gw;R)SUEJ}75WSola0%6c- zlCn;G`^1u077QAz6wx^n7ckI1<;_#6OnC-~|E8s@kJe3RY zedE&1if4?4!0S)OMK(OM-t$*L8~XF)p91t>!Cv_wT2LiGXQBLB9^V&u24Q_~`ApDqZ2%7q~J z4j_ix$jh6$=XU5t@5cdVbwd{GLn5MR5{M0>OHl&JDY-y( zI4xm^N){QdCnrZUC)rCCx1%(C?M!pH1VTSH*FsgU);zXXMlYwex@_*^c3?5{6|oeq z|0Ks=or40_)0;_9J4R(;Ej=u$b{HwhdHPXu0if^xZr+6V<5hjeoxw*ojlLscld=($$z;l_Z-95^HhKTC`EswL+Ct@xEO$Hi-AVq|r_APT#l2VkY)*WbdV znE``)59oY%){M|0=`NQyp)YGn?w{gvNm)$oP_crJdOoTMikZrt^OrZbQdf^MY3!6- zR5}-}Nk>uP>>lutR?{l=w}CC@V1ENV#PX-PxlBI71<`b|A^S@_$LHknc zNt7*`XB&7xK2AW_`pZzXDJM>fU0!i9^3E`V&)8e6ab~5ozG%|b>9p~Yt) zx+y5fA{m8~g07u`T>k$W4};4B9Klt5?gMQp9(>LJZUt9DAfVX+7jLrz^s9n)jo+Bd z*&d|erhk{%r2jwm-a4wvF4`B~A}ET5q9Cn+Gzij2sH8}PfOJTAH&}qupnxDC-QBV2 z5Gm;f0cj~Wxru$})^on^zjMdEf8Be>dB*z=aI>HFtY@xSYtG-A6M;~^QBUY-sTnLy zxl6b)3Lis!TNI7y*E63R;o#mTTRz5Af7K0puUY^3jjoWF?x>;s;^vscY2cqHlU<)5 z93mwh-!%~ea5vL)cKDu7-AntK6{*e*3 z8*fUed@l7SHS?A3GFHxbllj!RavX2QQF*_9=Gr)w3in8T$7M#e|&GE&&kBG^1~o`>%jg+y@# zT8%x~KHOpmwHSRzV~8zW!N*iZDJq+(8Q5}QaY+9DSS>TPWx$}6Zl z!0A|7!Mf)rBBvxi`T2@Mr|>+Lsr-_v+t^^u`Fd|c$pjC%vioM0YlPiGoLke+b^Mz( z6JKKn7Noi<_AI*cF7>-QF8Ht=}t;9zz=Q19lVQ z<8|UxnQ+KEbkc@GUmp5Ab~3O9N3|(Xdl#TCLN(EPv+oq;!J0N4pjSkPY88jgt^KIu z-J13AH#lt+it(p(mpVOo_dRa3?~>#kr~R*gClp@2G~T1upfoskIbCQH-ZCCMf!YzT&T%0aC6Yt z$Jm-I=pMcITU*8)OP|Kf;$J3iCWH)V7fA?YK)Xi_-Ads2+|#Fz0jpkpvb3$?K?bqL zVq&w=k2xbmL_|TOc6(e2#wMY5(#rMc?&t<`t$(~@)oc6K^pEX(6T4cZKgFXPPZx7( z@kh-3YlUF!GFhb+)Oio>~Rp_JrSw|u}_nJ$rzoT7acfn zvfUC&+)_u;OqeV!et1oaEBs?#Uy_1Upqro{jhI-ogK91rL-cnue)qSnFU&?mj?EH} zi|k3&Y9Xgk9NGRkq^B;X0^XO4-{|Gbq8p}7C^TD3?PcsVW&_=DrQwLiVs9cld(pnm zyw0Te{{Cnr7p;$#H+v@E8dN!$_YMjs<4wk%v=_Ppphgp^2@di*u@A9l`#(Q0Nv^1f zj)&KikqJCLa>~V$thIf_zqk+1QY`HL&=viPo=@UsH2yx9+ZoMH6@Graw6yeV zxICBhWjbo?)T*#&E5(`|lZ*5i&RoDw?V!q>fox%E$?){}@XyjF_r^xk-;DTPDg!$?-2#&_&Tqj5x$G(O{0J7?psmJtzP>BRLJrVR%< z@0_QFsz>(Qop5UJQY_OWu9)8O3ES+Gx5tMRE|ZONNtxL1C3ghP;uH3LJ3!m==+t@g zL!k-8Pbj@$7jH{o5w4(eWi#h0Iw=9b+;GEv^4t87a1y3GP4(AfhVFG=$ zZ~U&o>Fl7IR3$|1wXT()L4Y&dpz?yG;-swxN{c)}SNLO}6L9;vsFJ#RKzjPUs;`3+ zHmAhH$9gm#pY95vmt0<(hZg!Wi3iSp`}XJ1W7g6SUCE$tJ&GN@3uAF zht{_xrb2A3&iW>6qUNe+Tq`027@l8}3q`6>EiA{Ijhuhxe~8Nb z@I@m)L6tUmeXxJrlx1R!>5lC^hxl!r+qw()n6C}yeT!NbyKks*<>{xeI~E3oW0 z*!8CH1=Z(D)bi4gCKagp1lTaUgtF7Iinc~t6Z>3itM6r*w+c$(oVy<`P(y2LkcW)&8OB~vptg?}nA&zR8492_AI%W&M=bh5$x9w&FJBD*1n zybv-uxV8M-|FoEIz%BQ9M6&DJy_rub+KKED1041HkC>L$)~P-gU?QEwCp8_G%C1@2 z{z~uBCQ(=(4*Eh?tnyqbtRKyi)M2)JYNT+wmJ&g`pLMB=Vut# zS(clA(`2WgyuA-uQAbeL@x4f?C>WbFX|9Sm5a=?C)vPw}`lh7gGFFikX{rAJo==r_O@Zty@ z2XuIW&5uhDlGjBh)BV!fV1Q52afS&KB(wc)(*;LQ>ymzc>wdd5?e!Dy0XfJ0*SP9a zY^T2}fYi3t97!tX-zdz$s|Gl%5 z@sg4RU$MPY!hReDD*3#9v=K2G$c5@AqoQUfJ;j?Jze*qKn)f$Mq?p4(z#BQ3_dX`K z<}=iJv+3_dn8KaAWPUQ8_BMqRNawiRaHWiEBk<@2W;Z;5QL6v9RA_m~*oyR!$J4?7 zO5VB2x;o!%>+`(u{>k}uyS#S2>uY3#`SIe$6F{?U_m*xlBK1~dVYC{`;xDheq#gOf zIX)&PCeHf$oi;NEbG+Y6$Dbn?^SL2f>W*H?V7!QZfj>KYSF3wC*sxm*@($vk&`SZU1b8wrogKkN|=*4 zud{r>mkb(l_Qo)xJl4kV0b7@O+c@u6z!!#w=YshwGD;O#gFU)=&}baH-gvmRXH$fA z|9SMJrgrwB%wtn(A1*uGQ;zvDY6EM z%1vaqzKLSR@HmQ=x&x}HkuWy6)9&3sZb*rX9jjqe~U%&56^4VCwz(JkX&pz zGJMO~*td=A#PG8l-^lm%Yxz^Q8$gZtBIm3t2CYX|NS2 zRxK+r?QxP*%JMG@ZVZ=ix?&@%`E;GDd>1e5GaM08)zr+{-nRP`6{U0yQF(TlZK1KV zv-2iCF4z70)Y9l}&G_-1G1l>&qkVyftg2uvaTo(}U-&)INtgB}|v-=#F zQTpT3I}o5jknJ9nx5F<9c;p94souMp8+lcW0XJu^uDL#dlB2l@7uMYiLAz4L#l|AF zq<1a|P8cO5k;j9%Oy8XD?K#0zu$D8*!9PD?;L-rKdY8W9~AXCyjtvK>kaRpg~O8$_ZP~Z!s80>yReSD*J4u>lj1y`;D|q}nFk8} z>?g~(S-IzcI{NV8ViRG*;s?r@sqoB`F8R3--%+1R*0gX!f3ltyFCEb5{C|>@@(mi- zOV3f|&D4N z)nz>+8t4`gaosuhjGlEbS?rhLX2C?YqtD*EdskN{@#@Pr&8m?Jk{e&zFUZQR)kWBd z)q3x%=_ou=j!qrJlS6tCQ+^Ym7!z>R1Ki&TZ|1s3tmDS(nMs3+lV|mm(}%UU`%?{1 z2%pHz>o~0T>1Da+Ep4r_Jkj8POj< z5oMx0JWAg{ul9$k?1Rb>MQ0X;sp!jvFxEwtun)QP#;o0XR$ zAHQ_6_&OWd6Vx{A=Cl%?q;>34$)ArzQWaGD8?36{)*)DXyHB!BTYYZLE(Pn%Olc-Z zsj)FRT1X&I-c*e6bcAQ5icTZxIM%+<3hLDEwP;EWBZufPnm#GzAu#*17ej(9ey%It z6bj4nR0{zrv{UdKAk_l3zup4sl?=aap52NUaMRUIyd5Fc@7_M;ZnWuLj?ifoM%zk= zc;`fh(-b08Btuu@U8cinhn^E>-uDp_KhwXvzh)%5S$ov@d!}Ig#m!rsNc>7=oL9cF zaVC;SCvjPfQz8df#LpBO90p=WkI^mfZ=k$Sv|Wm~)6&wi^YeM?6Jx*psHzvtkE?eJ z6|BMW+L;aNIhnEpqXm~}Th*Ez@7K~&6{sRbsEN1{k^4}B56k7KA__rgun*Xavq_sd zi4cl{!Z&=5`}_q^_9gizJDe`2c&;`FYVNxjFX6l7S^4?HbzVmdSFpEG(@d5rkeDCHwT5i4_FXEsCgc|T4KGj%QwhjALrMdlUM&8clMVsk?&(U#QL~P1*}?MmepEv*dwp8su1yqCRrexN!6&w0*pfLpimy>U_>pLPe&82!R(GYHxJR6&CHB)Nx`GkriV4|Q*bxx7 z*YiJ3iyD5Hm6m>gIEq!iSj|;cZzA}!6%8+Ya*!v7+}>vwdrJcb&wZn6319N>jpc>T zWn6M^sJEMc`h`~hpFt?8|T9Tes?s?E=bV*3C~ zx|5@QW_o)6yMLF;^5wd@g{`x5ITJH;cOUI$Eh;2+EZ&+8Y4<{8O-NoLLNr;=Xud6Y zyuyZR!0g|&%bd%2CKv2iKAR*cf)GGqlCF4b{HF$V7bR%3l9L~U?*w&q7H!UAzur(j zoWDl^XUacKtPexo5ho}KR{5xdsv0xsZ1K?(Sormu5$>(X=>SM2f$D+kO-2Hj!_KAD zf9{Lr9y}uz{WmlZrW*qUOTm7<FTND+{|Z;n%%=igi^ad}v#X+n0&J$T zplTD;e*FCCJr`M&*3rsKZTLUWWzs5yWh`R9QUm*r&en+C6GAiR?V&rlIYiA}(0 z;5=xS(rwOQ_k_7IPBPY~i$cMdF+QZQJ@CV+e|GTDuv7}z5G29PALgmo_*cEe(Me;B z&7baH<#fN^@O^>()DX3HgI=!dXxCz0i6Oc(&Fg5-Y^KiZ&(X;um0{lX=FzPJZk}Tc z?5jvC^=w-8rT1F*`PDdlM0sdprHOS#DiTX+<)jP2ut%x5*T&7wPw2A$*%UY&cras&cZIvhg04n<#_Yv`oC>vdm=ioSXC2Hccjw~Ygv_BlEI!@&%Z6xl?i3)@EFIic+RL{#~yw)oE`bi+J7cXAK7ytVJ^|xit z?7rMApA!;>fBw|#Z)n+qEpj?nF^zUm)&o2|JSM0ES)}^+yZxrDbO&NkOX!&%Cnsle zU!S6^ygc(Yd{blNduF!9>e%r?{Q&ts#mDDOPHI_=m%bOp#op^nCng#Jx0b^nyIbX$ zL0l4pLqpeK`ADg#Mh*^K;hf7W(Jn)_Xl7%kpw0!Q=g*^{z?eoB)!*9%-})N8g32`! zA*+RcjDKgN_p&DN`rqFWH8j+J#Su~7PDFR{y_`7PWs?q~Qzq-6RZ=>hJ0bf-+dVc-ZIqitFKa+_iG_ ze99jp&wAw6PA@Wi6iFo$k)DxJ_1E?wSa2X||EbzRWo4}R_tN^G;H;YcbG&~i?aQV2 z;qu?#3Wad~y{?`IF74k>8usv^4nADI>x5x{e?d(Af0U3E{j22=mZ84?-eHvT?~8*M z_um8k2e@Z7U*kWq0Equ4{D1QlR?PlOxvPl!E2UDu=2`>6A`sy;WQTz^ge^}@nYdjPS=>ICb`x(8StIFsK&2j~N#BC^}{8tJ5m(%+%SRPsS5rMeX zwz+TN==kw5LMAmcQ;ID*@GgfX!a{7EbfMqaz90YUp)v zw4OuU`U&xVqBH8}cx|Rr!F=gyK#U7jlD|h@5uaKXeLU~(^T+TaylK=;&_*mu z?*%2_9j-CrhTVIW#E8{zg}-AA{(3(DPbe<+@V54L@Y%${)+t{?__BI$3LX6}$uQCa!MoeK!+6fnV(p7K%8o&VO0G) zw=5fV>lzHe38tlRu1YvAkA~Z)<-9n;*UHGp4|@|CETu354x(XG_@8mX-s(H!^-p0Q zd<9;pKl&#%MqTGy%gcE_oJAB4Z3@HWq^MxkZ=)U+)z>G&Sb8}#UTtd%rG$orc!ToNRE}#u!VI2$dHvo+_TvOTd@n z6m@Z_sIIAD#Ql13r{mkVfYsI2kMZ%cuY6TEK&<}?ZiCg|d4kx6*(GO81cijas!9(G z3=o}HU&z-%J|zs>E-qQzw~2XHamIHCd;liZuzDS`lfNSH;Z;!iyu6RgqXN|Pg&b-h zE8ToeHQV}2s?{+u!LdT z_|gc3^J5?+!4pxOhFf#*u-s59At3=9R_b}|QXhjmQ5d`&hNTun#jvyw5`!C1&3E$< zmT80WG$SS@gM&Pou(?nL4qa@@V~bu3DeHt-D8yl&Ml@~4ORaE2{tVWFr;xck%=Xxq7lEQSNoF+ek;^jC_i0#+6Ezn3>q0#!Kv(N<+~Y%r-8HiRZBm= z^x6p&FHFZWL$KD_tBzrAe4y1-(#z@{gn*ZJUs#;v?LQb3s=2X_8+NE($z9~+^M{_m zeX4xU0O>X-Ha<=m8ykBvRql;sUBmpoA-_CEpYUyTjhvE9c{+;qxYzWP9w^ueJ1-th zr)eAq4+R?9q6w+%x-(xPv~BKon7naRVzX@f#Q$+*pk{QdkC#|o-J_vFmt}}6W#$>0 zoqpov;QUW4RS{eS+5<3ZSV58^`0u6__xxf~h- zX_7q1L@Z3NMtH0pTf|1k(QHmfycOBkrRu99IJ?MCK~@aW`cPh8L`$PoM9aoht%h!X z?w1kN!0ew-%mSl78sZ*2p%C$UA*tj3{&6lRE-k{clkLuX7aQsREOJ<=TK@^|hXVN! z$s8KQP@Da@Ke7!pUdS_3hM@J>W?c2);Yr5Q)CNQ+zbH#U-1FC=Qsh0G?#$g!GTHnJ z369x6;ujfNpSfMUiAW{8Bkk#?S#GpLbQO_h2>z?44A)efTyuyJK>AJ!pNCv7c~YRM zTa^k45pjx=yw;rB$uY%yZ49AlKc`ofx?lZ~@0sZ9jUz=w=v7}v(6-@UN0bxW(7vBK z6=$6(lZIe^8D{IZQ$?-4UEhG}Lpr=ghU>bzdeXO)xO)a%kJf&Et2}gtJeZ&4Of>DM zPeinyNRCzpb{E6`KKizg2|vHdF4XXJAi33QoU50Hh9P5eZkqBFEyS$ITnHx|h>!p1 zQmMlIkkI4jHR@DW3fm#geNDq_GQlsP8F)NN4oM=erEjrtqq`3$17=t8upsmsawrWV zUf)lDpS~^NHaF3d!G`nT^Qt?VLmsD=;-QOY@TEzlBd(0#oRRh3dPTd^`10_$f1VXmQTW=<%yf7BW$6@O?^ZBbNj(|d;={lk=MdgETf}iP zFM=uYYj4}x2qv@7)wR))5!V=t7^P+_-9Z^j~c&W0eXRY zJIi_GFZBw!n=fD5%@9Z+XOMO@b0_xMH4~o4)7^2quHxb?RgaaG-ScV$=ca|9E9%#@ zzPccVpl%KOCDlzehHg%FvB@%D736ZmMiDo2W1Z>888h-yL^k9&InH!O$I{Zx;2C3P z%PUiqK^g>(kQqukh9GaWvur%4Yc%60pNQ{1PB=`!W?dlF#}PU*=#rU~ue4is(@u8; zBJj>MQx7k65{we@h0wzgd_MnOMG=a?NsmripM z*!T!V?v)|8(wpenKglW?aSrEGSThcUB{=+!b|8~bYQgrmI;`?f8Rl`zi4*=4dHmaW5z$6H{c;W^SZ|* z57~GiTpa?;;)Sf@-zW;L-HBilqC_g_Qr$54vbr%G9t~AV=M21R%AbdNuUn8(k_Eb* z1n%yl?O&$ptgrU#^72a7o)20&t{~QZHh}~O03pm`Oe8P;3i88Y7<2&SGYQ7iM~=6g zJW}7L;<5I}GGViT*X~TcsCSWu$UHw!m$mMf&F@7)_im3sFnpC?xhB)%lB`_ZY{2fr z{@ffw%aOr~6ILJ&Bjs14ZEC}#`YsMTypWV8 zNihmSog54@qD*d5hRQj)=wdT@KU9B`YR$`|xJWTlSsCr_;Lwu!u~RJ{%OoYUy(4w$ zYfx2R*BCsPpUnnbVBp4*_H?NDo*Y#H+oemmQXA$2dZHU(dgw{>o1N&@>@qb1TE`aC2*fe#9^|tq}$~apMxTt5}v7bCit)fcP;YIJhZ!; zZmjFIqxebqv=Vd>dShh(EvFrSc#t(Q+YXWVnDVDe(Y+>qHzj-rodn;;_8w9)u?%9R z^_vX)XBzvtU>45T%WcZ^-lTtio(**pWs$fkRuPG30B*SupDW5A)jR z5p~m(x#tNk?x6cp2$ijaJRUscF`o9h7KRZ0PIb${-d^R~H8FryWOG{#x14+iDT9?+ z02H^5)05X`6b{~cNN&S?JHi|1CS#)td=wxrc~MqQCIZsfFN(vN`Wjymws8fitb*48 zPDtPjlU9K;x@m6L{iyGduKJN@#!sPfA@l}C2+8_nS&7sV+ocwkn28^QR^C&P>#i}D z(hO9qJADLBL(Z-T@eMzP;gOO({6e(PChRaq7hMtvxl&i(Bw`-Ud4LuY==-rHt=>0k z>Kqo_vm1E%vP1qn;kJjh`BR{F6s_vVPp$FOf3OPB()344@52<3IZ!Ph$V4r{2)TOI zF7XZL1zv#k`}2>VvT7=L?}@F}3E?e(S(ra}MMBCz%il>hq!pL;{cEO_d-5XaOm7ap zOn;UXJM1bT<9OGAmmF6mMLtaE%)3*vFa*S~MSee=rXUOCo6*W`>@0!aFE>I+_E*M# zeho8EU5tyii-GrIA&@%l+9RR?MG90^ewo4Cgw?R09t9X{pDP zk?q;Ub&G8~YAVDaJy~)`_DhzUTNS^IK=8$eKXIQw_wDR( zy^{(jd`zOO3jAT|EKF6988C51u5CG&jN+#lsBO^e4hSzAf=Wsw71S%W^W?sH4TsAo{I zE_$w1*7x0!sP(1oJK;|6r+q;ZEb!WuEBjQId3aiOA^$A4WmAKRz=Uzr+QIDuZ)$ap(1*Zb@NpPS!*j1T& zaSdE;h3MW4-Ah+iYYLFY&V}_z-z%j~rL9`ZzJE5VBu8*wy`|w&TES;`-0^jQwuUJo zKCIV%)L{OwSKq?#vRGAO%6;_%3@}zA!Oc-!*1f z*Y+{ewA+5&8~y#Hd!m?bfs}RUw8!Mvbu2B?uD)lsqb$duO~*sc;#b2n2Rw513jioJ z@BCw+YFG*ss=3)D8Sh8+p1z9-{N*FvF5Ty70}1qbIskv7dXhohWrzjZ2zLCcu~*p* z(Dbh>rm;$j+|{XY+G(FkqhHutx*mo&o_@n6_(^|_xZczo6JK487o1#ad?EexZmVyr z0EH2M{OZQ@ie3{obk*SWL%YX5+g%Tg_4HWy%-^1YSKIYDS#%)qrUTqy}ce3car7`6?*b0a!hwL_`|i%6vBvv+8%#Xwg5PyH_MYVCcppab-IZa0sl0q~Ds=+tGx<^qsb z9i<~_%daLsHs`taLcpnEQ>d#rL@dmhm`D5D@+HG{d}(H9tAJ%~=SyuLuNa<005nE^ z&5a7=cS2ix_0Qp;*HNfq0T|QEp}{Ld`D_k>nmO&v%tKQXIq;!20qzdyk1DCGZIojI zyT*FzSedR8#XEa;nn$ogdR0NkonWTkxcq*ST=r(AUO1v3|AK{RL`XOpcz%nSud5_v zjV`V+d}wjHElVo(EZwf+RtAkGz-AUd-qF=<&^B_f8r67X5Tr<8=;(lO^BPg z&Ca6|dpYxN?*b#M$`kFh*BD2!8s}TxgH$3P=nX_fc*2}vM!x87QY4P(WsLr217ma<#JXh)8Y=>U1FYIuS%V|=m28S ztF;Lp;N)A)s{C(Ytm!GNeo*rw*^%USRL z@jnZgvnv#%cg&pMHh>d+0i%imq=_??Yu|BQQojA8C2)w>{$Wr0PbVlG+Wm6*DBd|Rsj`}-DRQ;mWhT_)gMQA8|Mno zT|hJu%J2c6VM9HR)SP+9&0DlNWghm+ea(qJr+Yp3HSDdwJzpL!g%|;iKoRWF9Gs&n zHqGQ#jzLG63odF8Qy4@k4arrQ^EjIR84h{@ae!A9lB#6`c|Y=Q!+tpvRjUie6^6lL z7(3Sb`wD(8SJrySEKG*c>0-Wu<}%IF+)du}0>l-BAWQ+@6Xs&`{$bj?@4elht=91B zZz~OwGstU7%`&nMCY===M1QA)f&YeW0D}tZAN*SyP#)xTk)~>Q5N~NdU$f0UiWK=g zVu97qpp$7N{K2ys2vkhr%hkkqBdavv!|ssEvL~ll&)3DK;LF;`+D;$99l&ojw|JR$ zPvk(n=ihGCtx68+fcMZIbBv7$j#GbOBZvuzqn<;~Vs$Pzx_- zHB!R?^$Yuq4hZ%E78^jX_YJ(F|6wgvWxXW(=SwM2*-2Tm!(BoJ$bA!KRdX7-rEl!H3|WVZnyPV0V&@C9rZ-t1>ZJSFQI>X4SfNEh=3p5)9Vv?&4y==75(lO&7ZL z=hvzU*_~e4Upxl=f{mAO&;$k9u1q*N1yRCI+Ba<$Lv^Rj>71GtpTRnVi#Ns$&{Wx| zV_(mcR{`(DP!0~i7+DpDxbohRj<55cqb`R)^Txx;+&p&8+6C7YGl9#myT zHPLYgVVToT4yx(*;7!11!wwp%z)or21>k$v3pNQsVA%zX|I*-apNZeCzILgy(~ecs zsl^f468v0tZP^Ctm^;VOwjGft{$S)NDa|esfLA#%8x8ukBUZ(0(ch~;qvoo#?L@z; zbU_&d{jj{^zW1I!V#62f;LE|z?Hq2~UU9@ud^JR$6utuO0!E#ON#2K5ybJ`fXd=(j zBX2IUp8CyGO({RAcwM3CR#MkT;vDcYTcQujRfJ_oetTZ6os>0+pEh1e~*G_B&WWnc&f* z8t4x4F6wH+@zmKUUh)gLIOcUH?CY3z6OkOuaVxu$daip?7UP2T44auV(|~7%e%p6d z4-X+~X@MeN){nOzK@>f?IUE2FZpi7I59}fN;b`y+KWvn-6V33JrtmH{>OS|529jpq zdyIwf5Tc84T;VRPL01-2bb%I!%g7;!rXsl*7~8NuEWXM4F&|C~mJ30A^;!(~t*3J{ zqU|hctN15` z6!cAm)D91ix0)B(ctq1<6Aouk9>TNu_cNY$4mXS5#ib>Hu30;IHeQfXlv&mreAuTV z?oR{#l>jdJmPXVM+B&KMdIXz?ILG;v`kK1Jfh3%~Sb-!3wIS^7{u5aF?LPEpvaX1G zOg-PlON~AEu~Q8|kZ-tH7@XkWe`7OMtAqgA`~B7y@>A!OEfKk^R|6=PB(bzDM^9k~ z#-@=(+dM^S$svoh?m`6x2ve^@`YY(bw-Rc>LJQf@IRwHIRTBzh^auLlc5wAaArRCE zX|YEt2&!vm5IbKe`4t>JA+p~3^2uCV%-^S}1Kyn(2 zfrEU-SE*bRVPO%79CF|wy)ytX6Gucna(3pyVy8M^6Vf6I5uZMNf;6bOe=g@JPA%g;!9l*^2w7>ipHP?{6z14h`ec8zZ zx?0Bk!43ohgiY6Aum5w_bovDrdyYVK;JtZt`RY|>s6`8>_gqph!vV;%XU|5!u`EvO z?-vW`_bqny|6#qllG4*9C_^RV5*HXMtY1%oIf}@jo~p$KH8{} zZ`4zG^5lt(`$HOE4NIYD^Z}gl!cvhFduzk-;NW0pS(z%RNOM!uuheGn-Swo|E7o%dk}G-Oq$3 zh0;o1UPx%`^+Hqg=B0BBjsi~0Kkk$$lV3VFG&-88ci+@G>PB7-A^dZE+}v4(E(_YO z1Nz@yAfBVw#=>G?YpW2N3f{c6#q)zhlnK4F2Mw(k%a#>KG2p8we8|0$&ku!b0nkB}vE!)X6p&ir8+v7Dc^D>;e z7PY()NE2nVtNpZ>uvC}%4JT9erDvfD%NF!%ZH_QF-PzfB3mq$OI|Bto1Ra+Zx;Qjn zyttd7tfFG%4XDcI8#iun6oeBw^O4Wys1^93P$x1hRsQ$!Q+Ltx$Jum^?q`HBfEVRm$Eu!ux=O6bi^fxO=MHP_+&d8Ja` zzHRz@_vWVQ=Slti{B8|WrUEDf-p`lL#E}7J*nGvGCi#H{?lp<_qB__a50b?@LH|0t zhx6OZhR|IGu(a*-?_B{G5;}%$Y&TtI5j}P%J|+f#X0NKMaVp!VeE(G9jnNi`j zT{Sf|$w~hblU}yfgsYb?i{tVQJCj)jUeOQg@o~&F#J$qD?_prjlqnj~@v~t6;9~yLcP40qMN=?xTVO#V zQ=cM9-?2933Bg)#Fm=bL%n7G(N%nAuK3IFYoJj z%X*9gnT5k_W82A+cp8aR>^q#}78GS1GxKDRTpFL}-?OmLpWWkzzn5MdYn>z*6 zR_7piL2AMLd%R2rnp<*MI7odMeL6DAkq(lwF&6`>YH@zJj4259Yhghgx`=qOt#Jg8 zh}kSySRRof69S5?p)IS-&vsTYH#?WUqI@sTnLPl8 z(`nTh-Pxa3UuvzkmVyO_R-^{`^wYbr04uOW4*Bm5tQ?B*RD#{xG_T&%@Brqo`Z`Z$ z(WO39|6DFZ85nSNWkti*qtKP5|02ADp0ozBLNM_&l3CH(9-CUZ%%X#rwT5&TFXS#s zm(msVWd-Y)a_i9Z>C^}a3WDrHciL!M;#(FB`Ya)&m(F$D-kavucyrBSVzHWXeo2CC zVtjnjGVu;YO|k1kN@$Q{rl%)b}`4veqp{0rGL-HAXzK9`Vv~W*_korT3zvx!=DBo0olp7B|W+3`7ns-l; z@0!rvmRZ}>!S@xE4>ZnClFqe|Ju>hL<}NJMk!R8zB&G1MP>;wJe%}^%xj~`$C z{LaM-c{~SQVW%5y`cb|(oQlBAhN-<`<9Uh7?%99Bsd_z(mO%8whYxdpXWt8ScL;1H zDSRWpY(V~?k0EH8M?yYLnx~9TM^Z?c6S`t*bZN)T{H@+-_FmtviY>VQ=vkW>SG z(?&@zYWH0QRFXKqZzJs*8uC4 zq%h3ooxkB#E4JCL0$PTZ6ST}e*i`O0fuI#m2?hhL=EviPL5~~n77lO#0e#c#e~%Uw zrf%hH*BE&7aB&eu5y=g*GBPt0$XrYm^5B`<1`8Hj;7~I)3ueO>U|XXrj(&9SpyzTE zuEa6 z#)hhbmj5yjeT(g=LWL7{mxg)N3m#Mp_l+0|C2rG;h|J__=;-LED;Tb}$;o$1^4jb`z7Fd+bxdA*O}b;La9HkA z*Bk%^e57@5G34RRNcCfT>4iEw9toR6bd@{{+=A(gvJ zZM$$I9Wp`8X?Gbf}P!&|)v~__R@bVoXc`zc&!2sk$daNWyCl?}5}*?1axY z*1y(M1+n?w+pD`%eS5|Dw_`*;t3z}|@6y2D`ow}Q-(3EBv2i!4?6YSqm2Kc+CMlTq zm4UdKP3W8OkH`HMxSp3&{9_NA&bd8I2q<&8F9J9&xWZzo0ry?c& zpsrO4nh3b`&!MMdtr|mOFMYvCPI-Cx%iG$KeLmPEBV5#|p}$`4(DC$&qr|feesGq$ zc7hgS`(M9%AQ5~`kM%ze9E`s0U4mxKaUf*yA9O2&!z`)*Uv(ifZjk#o7Ho>9#Q*Ij zeW(5x&N66%uQ)j6GLr|%c2jJNWY>p8csUaHNd|$SWpFMhCh^_lJBKIUAGqZIk;<}t z36%wJ2YUywt)R!iJ0?1pvnuo(bw~v4N(l|-M!fc?G87oZ!WYe0>9DV=$7^L}6@g8s zqAc{dq4c2|*yxsT@Euw>$|h}JGeu;Ejiyb5_c(n6o)#o9yiD=8l0Vuy7FSNxloPX|^~ z#l*xFQ)AO%>}<6jr>SX@TCUUlHMyQK&80OYd=4>hF15N%&7*#<;1?{nh7S{CHK#)1 zM&pt(Q|q3kk}|9EszIwxlxMNL^2zOf!lIsH&q)Ml?lm^0Fod zLv-e^Jypvf*3&0pB2+{jYQ5A*@8_bxyZCJJHIJ-v*~Gd^)XVRvhje$%i`0X@iz&hk zg@lFSy9rBK3?Ed8#F;vaH{7q`!Veu}wQ#$gN^JV7Eng-}=f`=zr(*D`1^G6@iTn8( zX4zUJ>*E}Wy)|_!U&EK&wBsD(LqkK6=&n}V2->NW=B3Y$6a8GOFq2*sjDp;@)gGzV z8|f|W9xJpm{<%KG}heDy!IZk0{Cq+H(8t`zMNe zt(PKJswjJUZ6{;XS8$lhPnW5?wI3mhsZ(`BF?`cP~#A z5JxNTe)j33ItQJwV=i4=*`t*<(C)f2p&xa71NT1h66(;iRH2-;X*Zv0;$RK0n%|@# zzF@X&|HsLN*Ls7*LiJ{`Asf8yN)KGrV|x2fDn30pGVD2&L8Z*>&K@s+9Y{M}_Kh2( zLd92=DvOth&{T?>P)hH*@*!SN$EWjMDCvk?Jp7lN@aL;7$4E-duDcMakG9J-+>|3= zcBi_X(HPV&rIU82cl~Ap_wP=FqHFDT?)4}y-3Z$7ZsP~;3H&2Y+M$2k_(C2CteCEP zxte)f9*V72Zy-IIPjjE=^l=Q9yQtS&EPlCkWV+XzJ7M>wV}Dcdjx;m8rf*L6S~bDl zQJh=uv)1=|SUx-+FTdG~x<`EC-k6za#dck&w}kCrynOxE`$Y%T!{~A=I_ZcG^c-K* zy7Ppl(JGp}y=br7vUjU`?AAn+HhXP1o+pW4q_kj8_Uf(i@|FVSd7E=~U9{y++JWzKM{@qvx00rhlEF#}+-2{**a~p4PG-1>(9ToSw0sPL!`BT^2`U4UGvH#-{6| z7nfF6%yv8|Jf$OA#uNEMBv1ORO3Fr_V^n;9ou1aM&1+sizWz6quIf#!EL0n&&!crZGn^Ka;f27O_p241rrGt=j1KtliQ6de@<=g zD-pdLbebZtj_KlDl*6B>j6oTag^qj?Mok~OU!^c|?UZJ9&k$2pMQV$r&k81;rMD5f zSj{3cwz;pZ&0{<<)hj+-v2m0 zQOA${3dz*ra5O2G91_B@oJc4_i(womqYfhsxz6R}c8Xe zxs+$7i4KXW!5F^M1dcug~YZ&v&0+@;tto!RWllk*NMcS*o%W z(OjsQb8ye&05Mr!e%n?RZ&}0B*?~o!@Ph*}8ZDGtuZ6ptXWCgv3(aSvmjuQchpilM zjr;9E2VhB3d}#N$AxU>PuHT^o=iXP7r7C#@k+u%DM2cpc)8u}oeTF9Q%9}1S5)!;$ zNt|$9pm+zkm6mc7X6SuQo zMoF5)b+h{_tP|LT5-Rp%ccw~6XeeVK)3d`N*2zh}>uTF(7Iy;l3q(znSON%k$;2Lk z7BT$w`DJUihaSRxh}Y^?_4o>Q&n|3&2Fn#@^>af*L+dKrDRK1O+k&NfDmI#CeL7yW zZL(uaJT&2&be6o;&lpt3H`UoL^4=mSDH4y@#h}{eo@^2~dnvG{PKNDLq7(kXT@0$B zF_!)c>h6C0qedh*5PHJm(VL>m(z@|WGS-Mrg})lx+d=+CpTaB&sqj)HvL~b|t413E zmwm{NY^4^&j!5H`QwOh@Wy7rKoa=9sH4a=D;f=plCd$ozhOo+_`Zvcu@lKk0;#zCp zoCo?4yP1R!Rx=f;fd3nAxUFRtI}!jlzz~oOb@D=qVZm(?n^R4c_O^zAqEi_qmX5#k~B$KF~|rRJ&dhI=TWe{wH6m^h5=f+bFCfTr?0No8#WQBl+k^~$l;SH)L|i|fa_$t{GwyUe5|*S&Yp~}nCh-vyY=AV#ws!g6BL*{ z;~U+uxX^M!G2?shu6^O&1Hvx}ehC z_3`zku+VBVC78QA?&*u*`-Al$cDp-A5ki{x_4g+&lvpuy#R*k(;-|h*ht@GX{SS0Q zG=IO<;MF3W%**g6Z>(8-5}kd#(QgxOqffRzRz`s5ec38&x`D-)7wJLc#y&BW7Ao=L zQ_41O+jP3!w2U__2&+gQ4$}!%ifFIR8}M~PW&)6Sqv?}^T%plarJb;n{O9ls;$*>cln$s7TV0r6iZBXMVxI|p zCLq2&S|;Bqy6n@M(i!+r#CN!v*d!VyCtpy2s7DhrC*@ z93)_znoTA@51Y_!Pb#Hwj!T9eJE*_8Z^!aM7P-QGXGEQWK-x@q+J;QCzI!1^+jks* zqM2MTQ6{=Dy@VQV zO#74_nyIGtexnVX1n^+EAmEE80%_}oa{*w9jTaXYZ-;0aLA0gVXaV;!e&l+`L)ON*9dP#a{09=%-)nUE?a>O&J<2Y z?|^TuBW<~!F=y~*-W#+C#0UUrVj-h@hV~gzXy3aU>yOx%l9c}79FVu4GFyB)RNn93 z-u~}W5n=mfHQLd(XDvpdEX&}+VGEgaENTcIAEdM2%5h;8kQgMIgoSDa!zRmV;zW|_ z9Wq&D>`2QSt3jx$%LGgwILh`IxMQQH224dA)>Lz5qfHdEW^~SH|NOIN^m|mSwZ_O; zchFxgQXS z5|e?gDsg!$N8B<9^6Ve?oW!a07vGG#j6^!+ls#s0C~>SBiMTW!p8eZPuTB@IeJN|!AgHS*^wY2sdCm z9VvTjZ|S(ci=4b_ogO*SZ=oNiUABJxtzy^)_8D7bRPbtM*yCzWC47sVA4U{0p$8 zps~=(67-Y=gOf3U&TPb3PAPd$6@TUV1hzo;?Z8-Bh=X5>R z9k^1pA^Ng6E6v+f`9NVEkI3L5=ZdC{?N4o_#_z4JD>>{i6agj4dJP+V`K=J}9LglpjlsZ34`vnRX*HYP$+X*7F zNpe-S!38k>SAv9<~X6$zJ9^!_`S;0FUkb)$f5|Lv#pJcL=B7ET#{H3 z-jz!;51_}+HJ^%u$9F}908<1N2O}2m&;L4;0ubIkQynpW9?itp@i$ZbVvyUgizpU@mo+wV72Qx83}+u!7|e;BsyBl$jC_aV&w?>8sjGJAy%=? zj>dFgg3}+&zjSK>tJMT{lgMGXe~tC>twlvh$;4+`?Zh5n-q*C9yZ`)^oiBpK2W3Kb f7Uuqc(E?sf)-r}!e3iewSYTsiZ%O&>_r(7J$EXY0 diff --git a/doc/source/6-storagemodel.png b/doc/source/6-storagemodel.png deleted file mode 100644 index f72ece621960039b737c1ed16142b2384c889e17..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 52865 zcmeFYbyU<}v^a_ZDvF4TC`gDj64EscN)FvfOAIiygfxgC2$Dm0heM1sk_rOSC0)|p zUBf%W_j~XC{nmSLt#|);f4sNuXF2$pIp^$i&hE4KJ^_mIk_7mq_&7K?1TZNvWgMLA zG&nd{#ctw(U%HC-5#T?6SJgKT$_7qUwstlq2unDXgNrSk3hs)gwmp@$7v|t;JU42g-L={;`_-@nFF#)nniU)Dh($LyKD6tO(_)CufD&a zHW!N#!NI}5i2zG_BPT0hWMjo@U~FRuXLYu+1#88@5rR6~8W>r?9jFZ9rU+{Y^=5TF zH5I}bLao6q$1Z0p3O7SYx!S>1T;)}bTrG_Fjj5r+_(IMC0Du+T!GOxy%F^0iz!^e) z30DBTV}E9&rn>CnU;&|iBd16uYGVhd;$r1uWoHq0M!e^s7RIL%vNJXjP!^N;7XQGb>d{Tu`^|R$#tvrx z%een)k%A*k&A?v%w=oKK2spq3cE4Z{H5(R}Z2uw?(1z_liG#(^|NGDXO!D8t@&BOf zf6(>c!oYv4^Z#(y|Dfx?g@ON8=l|iZ|8JoS|G(TjxHa(UoPhIIL7hGa9KY-KuVJb; z!Q+0@_%jX;6%I`7m8$dT+Juw4s$<>7HZLP*1gzWF*FpMbnh!qyXIw=kstqliq2g~< z2eo;tRfmNeC=IxyS7GHmlZECL-KZH97g^IsynDWie`u&^KZWVh$L&o~M%;OFzfa1~ zw1|NK<>)cg8BZTwenUB3La zyMF)j<;VO@;Bb-xP zKEabx?}1^FCr98X99LRD%gdQ@VxEWn^TH}gb>;F+C*;4iav}f@`~3e0kWklv-~9YM zBNVE?yu2K8VZ#K0=<+1UA-Nou6mt$=Jo@s{$A|dI6UHx1=}Svy8xJq{U)94;B=x31 zJu5q#iJM!Ek&&^jqhqZHbAyn=a^Dn&kB_gcqm$@IK$cxu$%lL6hW+8?upU?~PuTDF z_Wp&=gfU`~$3b+spM3c}dE#px3Q;v|+ar|Hqyu-zp)tfHsQk>|%a3t!aYySss>^Mr zSeK1OURsay1ye$k1ut3a!`&q{Z!tZ+WF!(985`?7de2bg50&_xpeVFgs{6UmLw+-} z94H$vvuIQ~KDolFk_$aYRMh)7YEcIU6mr)BQ&Xve$#~@>=;REYZ(J^Ky@&Z~z}%d1 zfqs2mQ`*DeD`L)jVVwAsT7A$L%E-i!goLQHnl)K+==(R4DulVHzU+@5uUd^1i))u# zDk&<4Ep)_Xv_{a|J6-aYEh$7GOiET()zHwegsS2G{cHmxdb8oB+r}Jj)q9C2{^BUF zH<^B+)rv?5K9_prp=XBCjrthlP5vVHW4i+F3i$|ng~3up|J?d|XA<_PiX-v>!VxxH zVj8Mmpp^yYZn412fMw9jtDVK(+PPi27i&-c6sqMjE7`7YJrMrq+UWj#4iht@@~cdm+sP+H1WpjFoPRnj+9|3kBmbF1buK`#+qNd z@zE^W*&Q9Sume4!nC>_<7~4JaI-z4S-g#NJDKsuMRD(^;Q$G4Gg-~uzva2)z(sQ&u z`ite>mx2vil`oa2C z_4EqA+bgY=Z=x%RaSKaML6ij`CV4(95LMsgj+9%7mc$MTxVgd8ED5tIt78re$`Dd~}%a>RNsEwq- z^5ikDR&Mqe9bwZtno|^+tnJA8!P-bh^+uHpvqoX&W9*EiO>(p3>5SyH45kBp>A=%j;@a)txAKmQ)}ay*s`S@qeF z{AZn}g`(bKv)^zi>v8r8zht^X#AfHB$0}@0?PN%vUGn$`&i)M%YBsj+mvQs4x7pI{ zAW~<;w?d$tF}|z!{+Vd8Ofbw$;OSe%aR#O#pFP4ak+d|j$Vepe^(&pvikDm~kTU`1 z@hJoip5Bj`h8lgv3BzgD$0|Y%Dll}G~3(T zMb9gHoA?$(q=p21S9@9SGQaj&%a|Ub5D#<>0STzIv(s_7*~4cZu*_&tUj>=UxnOYj!6vn!tMjt;YB8ua1T4dI4Bl0NR?_+Gl2J;glN z0?y9PA_!})?1e2Yzj$VVyh-EwFTa@;8hMLF*!Mqy2&r4`1k6Hzg8FmFOG=&rc2v5|yHWaIVf z(Dk+!{fFz`Q`F9jK_CQyWG`izV`}r`Zt<2@cDBLM4Aw^sS>(q`Wo7glaSSZ1eaI%S zzQvS64ePF3xOlj$eNQx2C;cf^-iCQRZc9!Y+xxPLc39&S9@Wq@v7x0)F&o0n4|2#o zp~zl(Riw4utD&{```U?ldk!l1n|aGmp$#ss+q7@)mmaPxE@XT6XEVC4sA|smw=Pkp zdY)$ne!MzQOTYR%hb*edN;}45vLMBa4(7UB+&;E1;OZY;NI3*t!aZ7RuzCw~6!&a= z3Ik&3eU^I#dgL??A-O+bnz9r)GSq4!_(WpZlDNufW59KDJl_p-d-0`ed%DyY#-6JjpB%3Sjz9;xH(f;h3F#~x$?WAzoxBJY z6ky8ll+y5bcT49A%HrL|*tj-GEe-0~qyWcQp06?k4yOFy8~PY)G-Q-Dl=-LlG~{b?}YT?PDx`u3S2$AibK zR2|6$$i!Fcxg2Cc1Jk!W$sOpn+{B!p>_^b3X{aqP2yQ?#Twe54hz6kRG4Tz1iX<1) zN>4mI91Rbx3$!a0TQ_UbeI2p`>S|w`O;k3#uZ^C}shpViQV$Fbw%w;hPutsM+O8W1 zkVz2bLUC}GM1@?(Zgqd3Hu2cqC($Zdk(+Q`^nxLUPu`XVe!+KGAJ>kz@|tT=g(({c z?J_-k>0?9-(~Mg#@YFLs2qou96uiBzuCAO~Jae_B+oU8eeNTjlM2|tny~gs>r<0k0 z3pGdP2N;Tp8u6do+xU;U-ej@lJNJA2x=D~CsPQaAvVCbY2i2!M?|E43>3paQr>Er} ztIOEM_f7Jun?cvh|9md-`|oUqd7j!aBcrr#wfeF?H(`xXZ7sJ~qbqSv7Gm8BI`o&h zu;+JzHB!O3GJj;`#Ip&X{Ny)9U2|7_77_0xRmzsF+QWxSJ$?kE7Yp^>xxvr9-bZv* z?{=B2PXOcyRKaC(%uTkodu#SrRo(Y;2PZuJSt~b%32M$bTw-Ni7gsDs^QS*-&k}nz z?ypQKidx8rc%IEY@TfP;GS5SGRW6VZ^d2~660Cpqk{ol&{)g@d`ocy=j!QkyqIj0W zZF%JC7qlR!E&in^onFh)`WR_I(tZ@LUU+CqJ!=Yo&d34rAP21PPdE_^1sjyNdZ2r? z1B1EpYFyJSre@8-qjHB5Ragu|T34_0U=Laidm_Ctdeks1PSj{=t*q-;n_{%wJ$`q@ z#qVv|44`3bOe$|v(^-?#a~UJ{JEtzvA`hoF_gq2}q56vx9x?qE5?)@P6cTf{0vJ_jUflR;*7u{GZ75QblK*&9Q?``u{BTn>IVR`FPanrgh?C&t?h|&A zS?hY;DB3J(-IDCF(c!q+0j|>439~=HU3MROO(=|=yENKVz9JcSLjUR6l|#Ber{>xq zsaO0H$#^oSaWQ`;G8GW-BxX7~)yw0s4{@aX2Wy>?59bd*KRDRoSPj&K_TFHg`ZJVb z6NyHrAPLt72zW1EXFsBI;5t1*l~zgt6*LDw*w|W3TE-Mcr`V5gFCHB^Z*1v3l8}w# z_ymOGTrwn*2EH-eyKHnM;k()}CBy&|*c#Lp*qfGQ>h6q+X-Wrb6|vTUTlKf@G_uSh zM!K(G(EaSkGf^%~jFZ&WL{bjf!Be&8A$4x2NsrtB2;zAJZO!lm1xl%QbXB53pL-9bvA z{#mSRw=);V%_!rym#Yp-F}1|VOy5U-7E*nG(+>Q15+Ae(?rWM>)#s`4Xc}DiC0Y{D zdvyA%juwGP5|$V*gN9fCOwAmMG4IYp^Dm?@w72znS4{TR&9~1lOc$n=m^y77-Ln7F zJ)!F_e5UB8=ORvvSVk4(t1fTPVlgd!{5JRxI>k@N-to&R8PHSUbl_+^Auuq^1n=zX zt&oY9LcW#=RbRPb;b7cvGN3L{zeq{mZ)LT%x4p7T3(Nk1ljXG^b=PV*l{7()o4DYD zjDKzP`o6u>kaxwD`bs=EiiuGvxImfz^kBK2K1DgWGdrv|qdI?Kq8AfVo~FI=RUV6n zds2JOi*kwixv@V-N~Bsy8VZV8E?AwNCa zHQ6Ml5Xf{g(JN4(bWWAAm>gK&G$X4zOCgQ!SwTV>myK4k`)36uX*be4M=xhxQQQj0nIo7Betu0}JAh}-sC$Wv*x zzAh_LVOJgh!y7lBcgYP5ay&Udhi`4G!{5PjLZ%^aKhP|n4CHA|bkPgdsI(m3B8Z|P zrnLA@BKb~q^X_I?SU*LE5Pi$?V%5q@#O8eRLM`3IR&^D2KQlo=2Se-~7#Da;D8Rh# z1QzqvV?*I5*Kj`6Wpz^AS5i8FbKSNpcKs6ojE$+Cy?sP=b+w7EQ$M1DGhWu!ao#_^ zev6gGbA?D(h(pJzGlfOQ_Vc6qgAfy4-YLYXf69>;{+FP^31~t_ijF{C<7tXjoFpNv zSV026;idz*Mftgbkge3>g|&;TOPPhBD7}W(HhUb+a_PhjnZC_v6^JITBr4Q|Op_-6_MfeH(cD4NaRG&+gmXX4v=n$G9k)gJ8j35wFwLMxD zV4OGP$up|bU*l0y79>I>)!=Yl&yWFPo+s`#^V9RdjFcTW7L2sNqnOCb8ylYBwphM0 z)<32#0fmkP26nl23ie}Bnlq&W&fen#CWNsqz&cMw zqoSzV;ytR^Z}do9^6+!KQ}#Kzm8e2}4)DJy@6yDxlDi14O9FR(-X!U1jtteS$KI@% z)3S>X@@tbUgb}b9$bMgE9z^D;&kh4oX&mimC}T}x(U49?RlHVKq3)O!w?G8X;P9H4h9PUUsx<`#2QJ=ww@!IS+jD_| zp#jE&_&n~Fg#=(KV*Fckdy0&>_luxy6K1L#-kUn=yIrOA8CGUx7|7pba42154Lz_l zE_$Q-=8Y6#07c>Qe!zyY#)RmUN5-0Lp}mY>NG!wDhXY)QFVP~M#rqK9x(kh}xque< z6CT;c3CEbO>ea$W6>=9V%pUuD)HwlE?ed9=GbLVwO7f9J!c7u?~u_clxTvp0{Q98ZF1+; z0+R^A_fG$cZQ048uN#P)B_^B<3=IuclqrE2=OY#^8d%keA1>)rXnfl~d%0a+G^?j9 zlFMT71uYGYgV_ario0?gK}1d(JdH?{>oi;cu#$b|`(ko`ny9}qE2Gl*Pf;BPkY-W# zZ$^!GD-;zKxgJgXGv35W9Wv7(3;-#CI9WQ7pMg_A#Hf0&kt+zU9og&X00ZE)R@A~^ zNJe;Om1cSw2`o*~cH^~l{|#~75(I$=?z=bLzViIYACr^L=2NZh% z<@${q79FuKk(rSm)ww!^#hVEQ$hAi9n>0H9?El1Gx(l9@@sgK^znh~PSbY|fNnxu) zX=4*@?1qK-{NAh}$2 zjPd(A^RqsI92a;%(Cw*|n4}~#505(1gG1aEmI$dX_XOd-ICy{R&54ZsOrU|WJ*z%z zo4$|dG!3EoTGc1P7EqE2;%>|RIh=7>w}~!i{SmjSN<~CTsQ?;-CW#7-coN9H=z_3= zKNL<6j*urDAcv3Vb5#32Es;-afkcD4rR58QOENh;3+>u?Oi77;Y1C{sRp%k_ZI!tx zG$|Eh5UyI9Fw$0VVtFnN+Y7Oc0wQH`cC-z0Z`ehJjm51A*-Q!net`8wJ2K~H-(DO0 zt_E-Vgv^{NEZavH{iLG5ffBwX-UmdMi@G@$SFj{#eMadvivvqp*UjQGkm+rnBy|^ zh24lq3Nku6yVmVcz>}4YO~ci-s-FjBx=4Ag6Yt-@FYlJv^o68ibyv^xc)q``;{Cdo z)$q4hbP+HmB_;XIxK5K63sL{OYVPhrBxqa;-FIQQw;!4Hri-HrOP4bSWVkoGrj$om z%lBsw)xsiZTZO~nAjKe#<s3}v9T0VSw)2w2GVNr5V!L5j128@dO>EH)giRa z?(nto@kmvj@fkPx$jG{Ii!QpBjxeLB=}`WuARVlD#8SX{>ut#)nNNf^x?XSK<;$JD z9rJQIAkrZ9LkkNG3`CAtMmHITgMTqGLx@!os$MY%hfXH&5SX*$>-Tda!opG`BOj6r zI7odxbz&4U`34eBVfBut47|*Zh}>pTLWr0Wmi&7RE-o%iY;4eV-V^bb5X42r)XLxK z#sUiZAN?wyro7OeIm=P(KiXxWUjq1NcgcHR+k6TM+&NHJr6+k}c1I>ZDr&`seb_=( zw_xSu#AIyLl4qU*f(o*!3s*49tr2}4zh6FFiipT?61RyzY{+0He7{+Hxbih8S=qoK zO<2EP`ez!^3tU$`%XxJx&5G9pQ3s{#$!;44YL}r{5 zr={sw)@E+5s;iOHfMQY`7-+4od;_%}of~eRcpC;5D|_V%$MLDf{>p%S%jeIZ6;)Lu zXT2cYnM*~>d_DhB3(zOhFgB)rIO&GI)csn!o3vF2(QY?s1rVL}F?-%&))N9*IXTQAM^^sGkDlwqb%f*C*|q#guO=mBhRl!e@1DC5gq&MQmoR7)#YCKw zlg9_qg%1n>N_TXqnwVsR=>CQ?3pMrYP{_fXQ1d)(4N7(a0j-LO_X$|#l+Asimku=f z2I}r;%>udP<9EH{B-k9}W%A;O=JtA5(rC4lO$mE?{m)cBN=n_w4>J+Zp5+ic@9_Xr z@0Y|rruFr=Ft`0CaeU0!&@Qu}vM{)$p_Y~I7r^I+AG*P}6v?PU1GA~w_A{Cfwy?qG zqZ$cDjdtUmBCDo^mtjD!$+%-x&yqRRlM7X`pEyDc7*N=&cPU0s)Bfol~ z&ez;rhG25RLAVO8v%`>gj%-}|*_nGw8(DB8?@iiKN(nJBv8743&8*7|Ue!3rM_^?x zGm{3`OqP>Qo5+}W6j;X7JT0!OABJgfYZYaleWGcx4eijap0j5E{rFKfi@xWwv)9^i z5mt50%*;G|`gGiCaj`ckSV2lk(bKaION5)W*`0CSiT7E)MQQKvHkwp2V~&+R_`$ph1{XaZlFb&C0#U#W=n==$uoSsQa{`b1)|sXO7*HO`!&6S6y` zYqJ?rC7R7DbWpJ=Vq-3*-)4Vsk@!r^QOa7VpdCB4p`pCg)E`B@78={OYK=hc1K)P9 znaoBCiIwc*5zVS2RLb7S7Z0Ey%k#CY{%u%f z2C|(%nUB`n=W&FRuh{QOCkp8M(o8td2ESOxroU3#=dv@GT7t>cG?ml8{``smGH!{$ z`z~iywZ&&N+VAK@+R?FXvR;4W^pJUB<_qylgO5VBH3A-%yc)c8!oqr9C;cf8$;g;t zAuhBB8%OC1Ivv=I{fRysU&GCa)=WuCkH~*9m&?GU+?Qapa(Z#rQlupjS;1>?z3NX9 zx~?yTL|@ffI*{hn_bd7QJ0AIQpg{OrQIZ#8w$M@zt2%u_K6t=gVTx0oKfGWPBe+_R5D)`=^aZ1m=`!gwAt_`ZX?ECm}9gEFACzIs*W{WW?0t*&>f zjXc;KN6mQ%a)abL*U85(ZdUB!HHT#D6Wduv{0boXXA<=2#TyayrMIVRI8qG95-I@+ z^5~!O5`V^ergED2`mMB&>kzAHV3lrQP>`y-yL-Z^%U&cN4GoRi&Rnb3ce|{E^|724 z>4f(l$6o;jiidC?w7S*zcCkSnU(7RNkB;%YHp$JQl-(29tahqQJ_x49#l;~rcduT% z_1sqd$k}xTi8C^nvjcSF~)>f?w>!iEn0&@># z?*x)%85prDf;>KC<7(u?1!njKEA8gQ$cxXhz^MXGYlANSo^2GK59$sc{*}Ggdh_!; zxp^7)a844f#>!zxAUT1DyBSn`7C^;1>b*^U|L|dMYO09a!5U)swo7R8a2N=2Hnp_y z-lkJN+*Ah6?ansgpV$BuZU*Z=nn^hQfCGAcrWjo|<>6W6?_EQwt}Y$7)z+xqzn4nw z1s!=TfxBm(92G_C@>dZ5N~!`(Mn(nHpqxJmWWNgeaZg`=o%MyPqem^sLb{q#_2#~e z#TErsIj#cT2vW1z#WSyIt#$H#1zXI|Qs%yTm+_E-Q*64|r`qF%G`#4y6wmcF#Lx08DvIM`=UEfHN6 zf<$SnW$o?l@l@f*eSTJS6m1Z|jDSXB}4ym#cEYUsT;Dbc$ZOifMwkc1@X38x7IA78BCy`?m} zlf$8aE%pHyzKed-yLZ&y{;`E2^_N8OardZQ{39YyjqZN}EaA_9F0eW*r>OmEFgZCH z&jAkvH0(3vdj`Yh*8~Sv&5ICGYeYwCP z9-1Oy06Z1`MP%&buh*tzuj#J0^eF?oB86=Jbk}UCCXiy$?iK0nAJ-#9ruw;pY!A82 ze@CXY-RJz+0M0u0ec^0%e?C|gs;j|nHGOEOB_XkJQVOSORn;pDKR>OofMy%w+IBJS z96I4Ym(VE-!|KY!epGwG38wZC{`Od*QW0)dP}kA=4*$8KiZXm>&vFO@DjK^AydDzZ zWf260J8%ST75S=wx(VB^JP;-BE1utZYbR6rJBFV>#DFx|z*kaNxB8Gi3>3V;z689J zefxxS+8ld)>{(oMTeXTZPEsuRnacg-$>`M9I(JXh_#HgCJGUcfa2MvC_=3W^w)ts5 zfL$DeDLy%tE>?KkDoqM_2HiGXE~hCO`kR!Z@Vu!>Ku10Mr?7C~?tWD7MAFsipvTv) zIymn>!iFcN@qB{R)RF}5dSCxwFJYpZNpq?g{B4><2^0wQ11Cpc zCw~rkU)SZd`7Dv+M|Aw^)o4;w)(X0Q5IA325MeCf z*MM#)GE4?3|Aw7Qde;MNgT6MW#kCNOL(eGo#*~jjmX)i2Qd4XDn(r{$ zJk%~jp-CUPJK+Q}FYjEt85lACb0zPwqUT!Md?6C=evJ_ngNSHAUJMQ}zF75bAoXP3 z-i{s^(je9|QH!m8%A9!oXAqTLrL~Ys_5HisZ*|orheVs(_{O&O5x}1V+nigUenzRF zfbZ^iK>TRs2jdk{nGmmPZEk*k$S%b!$5lR!$2Hd&0F#OEA`%~#CX{v7OV}BISeUQ+_UtXVlb@{p`FAUqOMOxfa%~qPxieP6itd z4Lsd0VHFjAT(a?#$OLw%0HBJzfq$#Qj5wg_ld~P^N>>3HTLb$J8)q}DJ&EB{^>2y20_W} zrbB!SizpTC{smLQt?lp@#x%czOtE>FV{-G&jUj#Ld4ljHzST`g4Vho*<2)4EN`JK` zKE8veb0wX8T;D?Zzmrk0a6B(*8fphl&a9{1P5opv^h@rvqs4U;UY zCCC4)y8rLht`lCCvEkGzfRiIA!OMPNV1VDz9aDq~Oi$H#cDbJqdZ5Hf0fe(+8XApR zS)PT7gx+)73hSz$y(?F9TF6rY{|!VGJpZvgod2uK0RQJ@wG^lP;7{+G(9IGSet1~9E#uI!h+-D;wzV%{oijC{KyL2DK4{Z!Lau9 zNapx;6?Ns@Kbh+So5(!Izl z0a@zuL?|qr7Pua7InOgVoU?)ZuN5DN_gJSw@VesDxfB|80t%p56yf zPyY-JttK=3{ca!A=q?=2uDCdm^f*1n)5H^7@rtqPtFuZddpD3LaMnnnrZ)QUK2gRO zbaD>!!UQ8Bp+e5?ScO%@;0xXSJ)f!e_7)8i-FHdWHHU;J{S)>MbN)1tYP*Ej&u@%) z#W6GY`d?t6>nHBDJ0+ZOt~hRoj_aLu>sJV#xacJ~uKac(mnj-zVBmUv;XXsLK2;~~ zSXLG=iyiVde5AK7oe{zDc1npJ5nNq%CgR_JWuBWueuxS_?rekIH@87q9 zS1ZH5r@toh^js%>Elvmo=u=E6Ix}L5t70J_i>&O(hX#fW^$Lsj^mvUWT;tScuMspQ zo08ByuR7%@NAHYA{NglG2WY}24eCHFClertI7+#wCy+DWWDsgL*0WW#3XYlc3v3@yo~oAb$UbN?)AxTPC0b)_3GM_+S!inn3ds!2`>yxCgnUh z-9O|H=caqsBBn$oP06RswYC4`Z`q<&fs4k#c8|C2$)g&Bh1}tiCFRq;Y-LGFVB^1h{kG4i8Bvenj z&1+_DO!TfeGIl`H(rYY_k8UvXy+DZ$F>%?2jgL=6ISI*?adXSJOq@+BkF>Qr%#RDg z5B4{E&ih9luY4%HC%k6(q~i)Q)8_`EIk?O55O?V(FjT3;8spCXG|M&@=k~RSZ6$4H zv6)*AAv9x@vSjo``KbIJ8Ly+PmDSb#gvF`BFr~$IGY)3gC5DMTeFao`wiaa#hNK1a z#NRWFChOv;Hd_ax@gSS_cfT33b**4R-;<^!wBFidN%ND$U1Ur}Xrb;=7!BiggcEs7 zE`VK60;Qh%@pb~UGzG3BNn>7}@9$1Qa>x@VH9S0O4QJMNdH)RlS;~Trrvb^Is954o z;&sffsqx@8J96%fO(L9EN7u%tiL@9UtMY z_*GLzwXS7|-l((Me;Sg^k!CEp`DssdO7P<+vJF?USI?&L13vdwGHUSnl2vQhva-mT zjZBuikp`_$Qe=#dlFlbdIaO8g6nM(%342!AZLkTtUzEPr#YrszMv)ujgT+JZl!>-u zR?ajD8Qq7Q><+mY4;oxh`Gq-BB7{w01UP0*`PvsU$WlyjJz`>~-W+_GUYO*v)|n)m zj5}BNZ(W56#IFgNK(}X|qEeF5mWS9>MQn5X%Y8`8`OhXc9#I;$DAcwgPEQ}z4z8V@ zU0t70w3>(0Qwr1{NzvkdkAK&=&Jo`cGSn&Ti68tlztrc(!~U(XD*JXVpnK6ryV(rZ zYvS@P6dWt=r;P`Mu!6zd+C{iciicTaCNnxFR6lig|3?@*xY2VZI7n|2^(}e@D08lp zbObUp?6bG)_H3lO#iW|!IA+&{!>XJ`-MV-jTW7*2lA{k2Su% zXvziUd9BwHJTAVV5pMTWtEvQADu&J*jh>=6r?%s07_G;}JE8sk{mak37fHUBk%@6V z9i{xZy0R*uTGF2sbN*}$BO+kfKr4?PQXR4Qi(nVfOK|C*KxWpTZ}8Oa%z+fIxeIP^ z^tAv^^lWm$fd#L_+9xZDj57PBwOZTE>DcXAI)1B!7`XnZwa8u=&((n15dVuVoxE?c zX~@jApy$Z7T7krLRFO`d{SE~)ReN%pE9@j=W=-)>P}C;Y2a^gMyQ z{6f)aCok4#z^+PdGl zN?E%v>28P8m(EZslnx*Ky%Jt9m<{;Ii^}SF9N)f#1_op)WT-IStK!8q>azFChCtox zVn0nOyqbqX$APoj=h5bXk1U0(-^f~4UkixtN-G*nswKfo;rMDcdp$n5(OQ1-&20#c zfL%eujpmVS0eg+eO!5S%qMmwH1}I3~NEs}=jpI;}h!pTBZOJy6rXaQaIv7|n)dMml zW8B*mIKXiN(aKZ89`aC(93f4RQd@Gw)aW8SD-W<6Ci-5BkE7;0ZyJ7x2(PrDvq)cS z;d*eyJ-qAwOy4eWa;%^ie+gK;Hk?QwL)8z3&*Onzi0m~U^wRT`Se-gbNH{F@Y1UhT`>c|~}2R~~7SUiA(souK$N>?Sp5Oc6oju{Zc7a`-Rmy9NU{fudrs z^H6g0ML5mGwtv0F($cMv@6Ya%4}(NIsq2pRVU?B}iHnoM;ykmHN8P078QtRI^oT9y zTtFiyBvHd?^Ck?`8e+N?W>@Y%r`H>uS^O1Tt9Y@$W(VtYsxB|r+ATz7w2oEia+@tb zC3Tbi<*=h?d*=#2yt1ix&aa$c)p_$XcKcGpZhmtqQbU+^?a1v}!m1fR!Ni4k}a=zCO)KNCO2+8lyK|PZGqZMrg;3!!7qp?2~`BBGe>PYX)=H}!{ zI1S>DZv$-3t^Hlt(+yVQKmP1_w+-K*Qy1ayF`m^! zUb)^;I9sU}K*ms!-z^r}Q>NR&s63|q&IRtB1A?C8m@Xtuf$K8k`QiANrx1FYuXP5j zS1*2G3I=QPRvz>J!Yn4zISN&7l0m}QCE8O?b*sA!6eC1ZIS(6tsEqzK(;koId5v6P zwsgo`M|#961FHfAb#GfxW^R5xkC|*CWU1p{B$4sG&u-Q~Cp+G!d~taL41_OIWAt2M z>Um{DrHz(6*z%eM&MjUaqWk82u{W&!?^`+m0u#>S(>j>a#xTUy? zTW@R-5LyOLp?ELwT;4SP@=K|$*HMci7TD@s6gWGl_+HIR>Ed*(;K>R*>2B{v0kLig z&0bk2!B9r_{G=4&L^=pWN_5KlC?-$;U> zn{9s6%ggix)00a~3?}3Ewi%@_pu1!pf0_(*aeRN_+hU?;HXdWL_ z65fQXRXc}(nI9;+n|D^DLQ1T^>my68M{XBKKU{vZ;#rI7@By!gvnGx5-KX++SCgtp zW?B-J8~ZCd`NPbFTTLWKCmDbfz?7eL;yI+=S=syDNlbDxC1YL43CEULUna9z*RdWi z#j9W(vYy9?%`S%u?R*B;`TsDF-XkY3@Tc^A^X>*=tB%r7adC7%x~?wylgpJob0<(u zNW*B@0S}~r7PNHP^2I1K-py(mVb!aSGU&KILhBdeVJQI^E`__z9j73Er7B;@P7w)MBnNG&p^ zpuXm?;6+;3F7)Bt)1hK0G~eg6ynSsJ-R@N-Qetz;=25jn>``EG{92*lCX!+u6G%R9R)mQ3lj7y^1=&7VK z==8GVcqe9;Xl(_6di5_9)W;pUNU0SJ4MkO5KrzxBak3`G^!Hvrx?b#k?_(;iVfbrQ z`bQRZk#|4Mamz;2XDZBBy9vzm-m+wg)a&1NuNNoERC|OAC%SzF*EpQ^S$>=^t+4_C zQ4f8ceQN}E^Og&gl91BRtsAm9**zXN=e{s*-*?m)Xfs@4b&V{VjfPFohr+<)`|eNE z#|N`10xJa!Y^I$Tm;iDz^>9bbosCC?S4B^4+mCL1CKD{`jjd37dgXxL(s=$OFTV}M zySgc2^4#zVbYHVYhmZy0KR9{2o`O=;+QhYXNL|IFrDAXN3g+dZI8T9A5a?S0om8WL z){8vNDH_V+Rz9$%TmPYf^8$niC_hu{DjFJ6Z`NsZQ(OFvrQae5G1EByA=|@yh-j$rE@#NvNOP^iS+pQ)$m3q{|mkLbH!4Rlqbqf&6NNY$vUAVs}E`sG=<+d~>ebYt~JXEtPM2^= zy~HmG)o~NrgT)5N)D1T#e=nuTtA%AM!QokNA_%uJ`T7Rid(&SvH;vdU^&SkFl#GBv zBdg-X>T0))_DEV&sR7 z!cbdB!L6mdXce(Jthelz^}Wa{^zv`(K37g%wB6y}4Jruwqodo}-tCw7``dH%?^9zj zDJ4oLgrTOG^rXqxZ?&O(40F-spZM5Vxto^reHN2r%U#7N)t4KydGSn>r0YJpVB(Dw zuP+-vhjbqdm4QN%N31%-`NeVsFOvkFtV>o9YeWF0*9Qx86tkx8?8M>3Rx78FC&bBO`65`~+0b#;w5@{qL%Kl(q(M?aQt8fbY|i$W>f7UHcA&Dvnhx%% zkMDV+?HQ)tG*hE__(H|ia>p;jO8ik<3Qi@bqr_jD$&SLDdIPttjhIEV%lWIHTH|v$ zd{NRLN$tjx*ub;6JQ{x6L_%I#8udwz@@xC2fdT9c43HFl0p+mNck8ciPE%r!li(C;radLdG+{vP7 zw|;PHys{Jd_shOL@{si~g`0)ZEvFAk`2`S)_wO5w=4*z~N+duZcV2Vn)51)su2aj` zXpRrS^m+26c4{y+z15FU#F1;dXd_%fm!4am$*8@3e2^!?^N>J6>V$f&FPr%98?>uL zHj}S+>-2a(tWWc~IzM`>+xo#XrF^MUg@L;jZL{_;|4*-FgC)jfN+>AHG~ogaC-Uyc^D zb#@AL%8n|=*!y~(NPnd86W=EnerVxX+QGHrjW1ayDZ z%ZAbPOl<7i9_n1=KJ)MHOmzFe)y&E4TiPOht>BoAt?@W0KZ=c`f@IEYtEotZ?)dc8 z^)t2yb6=jz^XqNR9xX9R6-7ly^yDD-J@TW~H9eF%KIy4+5&6W%<`zmsQqngntGc`Q z?ggRRqsvV@up!n`Q@h{bOVXdTVSNn>N-ur?xHfs`Dt_tTWT(U90x1Lm2*zQR{A@MP z`JHcs$FD0rY!6v~s(AhS)8KJ^Y4NeDnzD^0x5>e}(aDt4(W`AkTjkV_9CaSg-LIUk zC_f_PVNWAJ+S%U4kk~r0J#ND2V}9M*)5!MbZaVfi62t8+^f_sL{fyt&F4lCCrb^$n z{Q09biFOq?ZLC-LiCJ06`j|kMlwJ0K>-OWJ!#`sJafd&z?$SEoOdlUCwft;#W%%L! zpQn`j{ziPI^VCHh=4i+E`8vrTd|jY0b@NGK?Qhc4<6Q>p zCIsqLT;1}~^(V?-ad<22-^efP1@d}iw={NWG;0UXz1ligqOs5QF+#pmn z<9xJ9rRU_-Y025tYWBJ`fAQZfSkr>qqE9ZqugN8O7hz~*w6$KlQR=$4+EXYXC>RZ; zG2da!m(>^F@2t8l9Q#DatGCZ(=kT!EU&Chh%#f9Bo`mSzW{#I;p2bXiAXjY#FU0~K zA!ui<1j}WikG{T_D_Bu%a~D7T;-*w*@2|D-O3UdlJ%#=HCE!kms=vXLdB@tH&mkZZ z&!cmG6EPM_t!X}6<8gw5*|^tvGqx;%PlX|YUfANt;o>tJH zAwRNSA3Ynr<(Itm5V6YK`o^r`#A+lihH#bEWE3~+8^(?5x89#V8CR^)P~O`;%r%lH z9?KuV8IVVspG!@2)mTO6>-}H^xsU~UnMuEK@rMYDNCv435gFPukyg)%-5@2xsu}Fz z7Z2xKBAJqY9N+50Kr>kCYnBqP8*mkO+g}KCnr+wRx7jW0*Qyis7km(<74*;c)vK?q z=VVbgpPH#fH%E@krm|%43+3bbw!q5oyY<1)}gYN4~WV*YXO;raDNG*vX zbwYs=6&RAqe16thwZ~Y_z`}gFBV`K-$_V`wY6eD6FfBUNccc`DhcIct(lqZ#h zzWydIcFXTyNyPEp-APB5JWA%H)(m{AZyHfAWeCb5fRG7uc_Ma0Ir;`UqiuVVWbvC} zrsp->$MPxee>}w>s5^db2noS;{4?HMlSc0@AYjAEvBF=%{q37!|B06g{IPO)@p;zs z{%X}Ms_wPTrF4fiYg-$={*w(m_xWu3@R2V6(Sghluv2Y&aWFB{RGhCj+$F^J3n;Jo zQ`Y_G<%v)ZzC~JHdBFVa1y#;=3+IfbygY{aSmAH&e)^!$LZrHtV2T=k@hn^i*X0kN zHWV;tydOkEd;ai#?Jju1v~}p>p@|h=;D8w^W+0k41}Q z^3J={pItnq|7SCs(quGaJuy-%6E!-&>k&|+r_t(_nKC1{IFT(DAY;`~-|Vpx*eT*M z7tFdAKcTTigrF>$$0hXBdG?#N?GWm}&MSMCL|Au`@B_(H)aeJArXR#|w3Qe0oaqIRv+uGZ2 zX{j;&t4X-O_CNUDmZDiH3?=z0)T)0Xi)7xzfrrsZ=({joh+=(z>Kyr|#SRDZi)A(U`5qn~pXb zH;VZR8Zd5dOh_h?pk6BJm9MadcBW12%vqS$>vohGnV7_OU9Mia!V4sXK7A>z1(}&L zDH&10IIMJZU-s5Io4TXoiVrtJbgB{Iv~OOIb~GswuRZir?93rVN@>tEX59Arl}K(V zHXx>@Rd%&R$H;IC4!e1?(#F11!7!WF-U0_A)xk_9-*@jgs$)MdE~B!Mkxhw9Y~kbQ z9DI5nKr0+Jmo5`u?96wObc~6<)L-?b-tz32V>vncav=)4-2i)4Jw3f| zNg`1Y$V~W#eO9PMdUQym19j8TMi=H>kT#Hgqo;@CYd6E|1It0j4dYp<^X738JIsKf zU!7$pTSW?VKv^>VF(zU;{~xudoR^=O%k*<&qewSEu`GskQcv6gD{|_5pU%eC#>V0RXQOA+B!$(wY0tv>XCrrQvSfyW-P(r9 zSnlNVYLJ)n?em{McTc)iH1;;1S^(+dnX1!td${Mpq*iD3$&tlo==X0z2tTb&Fwkk`bE`xBQ>r~IpPrmDj z`(?Q~4dT=4PbP^N(u!f+xnsyIEOkLWeY|Hd{zRo@@pI02ts#e7ELVY(QHW}zN6p0E zu&QP=zD`_sq>7XgOkPB6LQrB38KFTZ|1b6a?l!ikiL}jHkMH1F(u+9L-W-3U@*eH%9&3=@uMhW!*Uj0>W59O)v9R8%KxV zQ*Y2LlF!_1m6NkS<|noMvAE{GiE?tjL5Gl?lH4zy-1>5RV<4qHxLeBYLr^LIJ#34l z^Ml5tU+rWZeLbzz1e>XD2Qz`yy< z8GoqWtFgzD?PQqal!=jiZ~MMQ>IwIL2ySP??py!ElV!e&ul`fFuX|@YtgXIYT3%YK zSqWrY7FqDSw)NxK#F&tRQ0ZF$;iYrcRk1W&*iR#6F7HnGnL|n-CX_tcI%I-@+72~2ApBai=b_x+i^Z{*@qunnD1sGHwS)+i-RiXtMy zA|txK>MQoAOkF3-xpW@1pWF5&(N88|KJ5*mdo&*|O}OF;6+_VfgT7Kj0ssh7pZ?j| z+hfUmYd@G-=^Y$JxCDGZIXh=V2NzfGS6LS1HdgFK|MTsJrmMGY4(e@S@tADb;ltKg_nG}>*2lUeHb;-i{Nv}(!-Es4VBI>1eP?|#vXRm9QLW-hTzoggL*pV= zw7_2FdyP(c_K0oQm!O{i&LogCU+n$UlHXkwjeU8qr6F-14W#V^b&L z7pWyWJ1i5_3fqMM6#B;R#+QjujuLcM7JU#knkjP|#6<$`Iek@(;%&q?iw z!cux$*6dxII%_oYqyY5H9F;%+ud&9CDkm#YjN)&7$OV6UVoW>HA(Yl6Yya$^vwcHB zIdvZ80-fqV(253b0P>>q-5~_1>&4rtGI?sMYENZB{>ayc)o_~%HxBKz=G^4Kdh&T= zrx+J*hQC5it#Hx}PM&NFSy|D~rbF2%1k?gS#myNc1d-;7BT0m7G)znnSc&^34yeId zzzz$rcL_6+N|)g}Ici&f(R(Ad(wt1E?vO}MM8siVZpu?^%c?=u#0?Dm>CTdr$@ows zv+W;^&V6(ltA%yB;8iY{nysova~p|146^LMQ%XvAQFI!1`5|y^pLpby71DOge0=-I zTC7I{AcIq7_E_tlo_ifRb=7edDpIZS8x3|AYgBY9r-@dAw?EEH>oex%sN`h6D`-sZ{S>Q{{k+i=taX&# z>afwsyneoxNJe>aL^QzC;z!Ifv>wrxXCL{hYa*lGw}27+Jr@*HAUb}FpQeZ_kZ1zFFPC_zL zd(kJhVPP*9uxeyRHI!sjio_R0#gI-FBQ_RUt|2sWp{Z1 zBZ?5wIf!#~dXleJ5oyF}Hyjz2(s3|3`qPAu*XEaOZwWmk<2B%=&etgM5g;VIl9R0u zJ!g*=as|jpe^FeQef8?qKjCpr2XOOl=T=%(XaP9Zz-mB6Es)zMWRl9;w-j5s9CyFQ zOYQR(3Cg#TPZ#`r6V?sf%#S_Lsj_i?Cz*Ut~^ja zie!3%Zu5&jhb;g8&Pi=pG@ul6wy&jTf;qCi`4|M48N5s?S^Yw+RqVsd#Jp5ldHKhW zwEIeS8KvV;LV!r(Jd^m$WL)XRUz#f8-}p=)YrkmzUGTwHIAI^AwMnyn0APNtx>n z50iQIiXBjbxuKOIzDXPsf))GAAi?y_NS=Bbw2mYEq)v}Xtola`J913uK4zeixDyh7 zZofq0dOx5h`YZy+#=`r?HH@W?ad_Ao2aD)^Qx z7CWyRIo`XD9bk{i@Y!@~MmPx?n{R2V{nvwt7h9x#Accs7V_uqn_a_lNTM`y^i|YZ) zq*}J_<#2s2r|2dD8JR*Vip+#HyFxP(=QBl@xU_x9(f{Aaj{N5Tkc%(SChV8n?8ziaC z|0N4(SS)jRj;dM7k1=(=!#R`EJroNTfI^D#5$?MyE&r(nXeJ|!)M{yI0bGa`39jiX z=4Ae}!VHlxbsG|p*VADa6*dOyldnORwHAO#lj{(Xzs zMxBhU>=j2RCv;iSD;M9m^wHdS_1gkid(0+OkOC)+RPM5@ib_hteVGznvs`PuLVj9S zN~@x(33vZp6EF}9Au&&G$Zyiyj>L$F*;!k2ZET!{!AuKlzWm=;v2$_ZLXe`J zJW>!9#=uG6v}K1>D!(hf!bVlZf#qt=|C_qT1ZVTwsz>|e&dP-UPyEKdVZt}q4FLhe zd=9Hmzqq)St(WNNKDoVcqqMfxj{DP91az7KI&jM+;E=QyU2wMGj>E<@u*($5mKGMCE=j#@zEfp6O)hHB z-Y>p`E9Us8n(?*Q-iZ3{#w=aNfa1aWbkUEBV3<>TM-R_kCeMBFen#}wcr#!8cF~~j z#`F#(W)m{Ezk*C-?m@2y8xzx83AcS-AbF7wA#YwD$bd|MNUCrs}-M|*OaK6kT7YTWb~sExm*J0Sf_tTa5oP)>bomL&!a;S zU|ce3N5UJG;hXdx8{olltaYC#H~=7Mj`HO9lSO`!@>9v8wY28 zr(ddbaxxK;-;&V1E^6esKGj_hfa#-01o9b?O-*;!8%``6j#u+Ujr{SbPhJhwEiPMstmh{EE|rDCC?8H`jW+#oEHqB6&^Wpji@^AEq5Be2aN1XsP0o&FDkX^pKzCskV4avjF7HHJ$>gsxQ2ezpu zY*S~TS%KII1H78BzY{1Z)PHBg+m_h}YaNC@VVOP(t0Du{h|oYqGiPB028(BtJOYGB z$G&`g+G!TA3EGmVC&TEZ`arBG&Rgwd(5;qTTpR&F6))5?<6@wJ@uKJ8=(hO=L;swg zZ+}|rx*lW(oVKD+D>!98^bp$kBT`dUJUt~RD_Dl~%!b-r`buK)9a20M$TCHDK_d(tr3 z9dGXPC1K;UG4V#s^o#e(tc{M|lPxECE6vT=J5pt(gXl|@+87t|I(H8W4vxQNu2#T& zcVnjBWhE;XiaO?;8c%4vP9|repI^D`hluCt?Ua-ht`}dTVfT0L!nH3m@D1eBmB8Q)r2eI_OfIj@&u8_JDZz`CgU=Z%GWb>HLI}`GWa8I-=M+waC*bD6EPnL z60;^3=3(pF>cj4IYNW;#t?>5t*1A|HrFmEtobe=hdpqvvS8#N6OytrUnzx-M1?|zZ zK&<bJ3Ei3?z?yI zL@IANWlDQ`dX`O!xNI{E&$>gLo22?3nei|5u_Yt3Bo_s$7v|2`$0QhNE)9}o#MkdW zqtQ$ZU2bDW&qwPz*i37CmHGXTa(XRAVl10P!RK8^TSodD@8=j~Fatsu4?EJs4F2S) z6H`$=U)wm@M;dIw=XJZYfq6$Lc8NNHUb2-D0K%pN}`g6f%qm^4JO{YIbn-4a7u ziLVpEIxk0}SC;%sGOxsL+e&N?_-%`GjZ4($z9+Dhzeg39B)Hrs=Dhqlhce#;xia6Sck?&7;oq_nhsfboE<_Uo$D;oZdYXA=Hx$9rq$56!Twk9Suv!tHEr zKbDrh|M-#d0qc_j4>;_{l@K|1+%k9WAi(W`JORqHoj@Sd3Zs>Py!@j7)>c?4tR*B= z7m}G_Nd54ay{F{Ftq-IQH@Q1Bw0+#tX=Gf`S!?lPx3MM8Rb|fGT_!1m0|BXG`{HC( z9JaY>jSNytecscgr+YSk zwwlu%fEFx~8`D-(^J`8ES43{p`!MyLxiqo4DgGb(!#%GweMaCB)OO zHED3*KBnvDLQ8r>TicqWXf?@ce}Hr(gUXDRZk6gcZ}rFbwBBBZ%$6xBSMVu(28P~R z4h(>=u?bzD#MIOwjg3<2>FHq?+{skPI)oK>x@Z>{YDxFF7*6%lR|AX|LR6q`q2BJ3 zS`Csi4s1D6w{8(-M z6SeW;Y-6It;A%rdLxmstXs3mcLDi&YjV;T;lpX%W#KaX0j8#9RI;+9814zNF;(dI) zlCd$XS{_O5w6dAm4LP|B%@VV(IjI1@av@CJ_7RVgQdJfQG8WKsl9DMcT5Ixgo!l7S znDoy*)6J`TP%@xc+2|Fuw5FvU;-#kc_GGBmDd%Iyd@DGjUwK@Ts;Am(nq6*b-!x8Q zWE2_e+IzYtvR;Tm_ZgJeryVC3lZB_6nwp=h0u{%8FDwk_+aeLOqwU}JaKJ`i0|}?v zW)m7ds2;7265%Opv_xU6j>*Z|nM$5Ba;-`W3ViX+JISuF_=KbF0o&Gvr4!KbDtyJy5Pa?3K0t#@>Pv zZdv8%#N~nR4OJAwCwTWv9EGal@cx#6l{bx~c|Iy)NjG;N9MIZcSK#9sqy?-L%yH;> z0WTPtYt*;D8@{MIz+)TlS(8!<2*$$m4*7b|vF5h0roWXOdh4shjrruKqpnx77>dOA zaXS0YRU_lflEzj?@-(X~X;`BIBO(YCe3~!xTbTLz2e+y=D$d<>0Jq3|35OXVeFiW} z$VK$GYIa6<0vyLd@LOhFmS`$1CR%|0&)UHOax4jOJkrkYkcyZ*pI|4LmaeZ~^=Il_ zl0+SXXeC?;VCR_^)ypd?Viq+V-vEE|CqQ%+##d6rUHGBm5gE^cJ_SGvJhsIVbsZz{ z(ZcX|TUw;Q8Qayr=FzT>=%n*X0v}bEm%Okhti1f?9vvZ~mCo}lsA|cKe}1S8e_(*} zGeqHIDU12*o5N33K(XJIm-m~u63H9U%6kC%P=@uPS#d*Ko2+B~iDlI5BL(^GsCWUY zsnpZ`NkclZ#X~1&XXj*o&`^iF8)LfsdPoiE!QUMw4f6r01tr5k4KdM3cGzV=0?1Au z_8U;U$WOWOKiM2)sP+F5_-4sD+V-a~b8uh*Ar3Mc@46*}j6EW+ef!`L)~ds0ygpUq z@ZmNwY?qa9mw`F_>;DpFzm*1maL^rUEtfi;TN`J;Tm^LTpH@5L%4L|Qh1)QLbyM+Gk45FRu5$Bcu1fSPi0l3+xdA3j`P@NL%B z1xBg|alCKf)}ltN58|3~pXE_sRB9DK+wZceOuX;XAZ1rJ7K9p0Wp8YXFh;Mf>acP!jDpRPds{_YX?|6y9 zn#59=1w__}E1e;X7MshC=1!f1>IkF{4eUB&!zZg2mNvBorK%AweotmSmD6H5;dnDf zF0Kc37Vz4(55Hq#MHX(#J2@_{hUON&x?+MXB-3u|F%DJnI!ADnJ-enU)ff9iGz?^pb|ZNE&=FLfb~@%GKp&Qerl zTfKk(Z>D-)sQ%HI<|Kb$}P zMb=2q!t!o#@NvV%xtrye6;{Ogc5EY}*r<35F{?;A3mQ zcmWw-F(mkPw3K@F)829mkezNpFuF3Bg$zdV@$tpQ#RGza=g$siujbOGeli6>GdebQ zCvo=F4@h;107Lp_EWo4?wgA9on}v|@(Rgc}%MQ$da6n)nu$kgHySShrl`}*DE@yjV z@e*!AV49&u*7ab$_dZX38)s3Sw5BEna&?%Es7s(S2?#_%l*JF<0h_`{j*Cmq>kYGx zRtztUI6r`j5O0%Ij}J>JZei=wwLtm`K9wHm*AQxv_d!9}(~n+E4P+=Bt@iXQO*hJ2 zVpz!ZR>C$I{LxyEUu}ViKQr}{C9mN~D$8su{&KitDJ6g$%5$*)xAZ76?$@m81JmSGCxm+7Ci3)+*4hwtfSG z|N8aoYQfS%cNAh5Jd5_FN;n*g*7Hygs;e~`Jtd}Vor)?hhCe?+AS;r!`axb)Q)Cnr znkI?0wPKs8NA&q><^^%uW=-BGUm`&P15%0vt6%u$8!1(sW_ZkuUVq)G>n21;N}XdW^yYo1_#LTy8HXBtW*8{6{0#P|L$w?)kL@J4}|`s zUgUSX{1nMl1%5u#VXHGXIeAC+at@Z;?&?UeQO^gP)nSh9ogEHt?n0-S;Dk;SW8=!h zDO5DHqvecnYdgDnu%X6qz}FrtacNl@#H|*ajehc0g6ezN^my&L#{<`pik{F^0v7I< z0MM&rwe_w>K?&h9Px~i|p+9WB@xl{oF5mh4Uj?^xOQSUlRutaTf;-=JKn-t&TM zXBdF16b0Ts(%cUzJa16luvo>xfdnU)ymq<|;>0*HCvGG$qW2F@4Ouxkluhtj--DS~ zRVAGDJh@5C`hcE?CtT9w7*PxdkSc8d+Ulh1oS29QLSP6{Ex5I-y{9cNl|A@qkfh)P zpi;hnsJlCm<^G571sNc27tiy`kgK%LY05R< zzdwQ^-@{%j&(>0-9_qd)Ji8Hqx#DG|+l8JJY!U#5Rdm+X=t031=>ASz8B0T+_yplj1#r}5X{ z-$^3Pkvy^5Q6-Rxh%9^v4pX6?oSfX95#K0-&M+ew5z?Bktp#@*G6EEUMCUG!QZGI` zOd)}Yoz#m5va(kS3kzY7gMADy_Be5rmX>w~P_yQz9~CcUXxI@A0#a+;OQ~LcuvvqO zig@61umt|d_!wSw^q#!nVqxw2FMz#1R#(S?KEhAz?ErBdL9H^fMj8tTr}dSfeu)Os z&{;K}34!%06qfLlqSN`hTgx;tt~wu9|Rl!2Egs15-lYMi>x&YKK(2B66g1i z&zy68ka>+kY;XtBUx@O}f#zL{E}8s`r~Go96#Uf!rR#Y|Cnxh88#-&V;7qy>$ZFkX zV*W8RF{Lj={na$K@-oktDU{eNZ)Rpz&~U%k3k|fE;IUX)dQCU?9)N?w)m8xaWM*fN zYVyDINLUnLzmlv2-mJmr_SFUHUSDYlo*;msS+6_Tn4SG)00Z*=ffbryb+}rCf2jjA zclJynmjzyM#fQuyQxE`nAf?^M9aHWClI`u+Z}E0&*xK5b^{ow$=?GVE9G;(9NI4gG zi_*F-r(+NkQBY6-GcB5vlaskTV$<+d$0rE^20q)1j)bfvYoaxZ>Z2NC5+U@$CTu$T-C43O$z;kq1M;d^Lff#_c= zPsN}@YfT*cB=}^5?exnU``2z8=IhvfsX6(d0zUrvV_ac0T@{`wFYoRoKuIG-^+J2o z!NK9QHuIy;pWwXiM^&ZQj~yT^=5*UF|-vvs))-T$~$2dFszhrVM{+>oGv? zsNkR)_(VF%N?;g(KR|9ueIG9&K7K_}a{TYUvB^5Ok)uuXuV1L%MFRl?6BAp;JD<0V z1w_T>$KI&nv2*h{)g7VSGj}IP7{-2hY`cZ$mEN)MM?|*Q-rwJk4?clHDI-7sdqDLu z%8Ef6us~etJa!2?!)VM)r=TjLq^5SPw*PZ^G~MBm9^YvW=b9exoN7k+5Vh1bY_q*R zSvMELzrRNeQ#&&9L#8OZBNL=jIk_x~&w*q|AvIe^RyG4G5GL*P>`d%MsytR;9x{Z; z8wnaQV-XN2lMD1;9l)V2EY#4}E+)s-!1cQpt6fcP$<4i6z0k^59w_{*SJ?fy{m<%A z@I{f;b$K;4QdmF^`Y)3|1q1~#GBf*vm?YL%kVAhQo{&*(zHbiv#uE2%a7war#N=-+ zBuPG0wdBs;-4*pnSmjYkoXpv>c6e>GozAptN;%j%oqXc9ms_RsPCYkEv>AJ%V!n4cn?bN?e|2Tn8DwI&psrC_X>~X0ImwT z?-wj6aCT%{gU1Qw3)=4^-B@($>ZlJ{xiHWg3P4$*MI(G8&5fb2jq(RIcbhXUt@Uqx*DN#|&WParyl&=lg-m+2tw|&wx!Y!8bwUd)6rt8I4zrK_-LBCuekz|vBnH#LBZYJ91Qia* zm1>WK-QcqXct`QqLX~|fp-!*ZCF@;9&htLR#VI18h(4KgUs=v50?<+B*4KUWqKKy- zX6Fj8wOcJ`*?po_9T*!T{zR!1`SPi(qY~EVckjYN40;K#!#SkPz@Q2^FuTZQZa6s? zx&2*!13#YPJ;o({q82rSKL52|V?Yk~Ohxrl7a!NCal%hd)$7*wS8dSsgv!X|a!}-j zo~y{W+j!PJH$R!K7TrP^rPs%oJas0fTt`UXpb zdJ>E{Sbk%mwSlidk`oh>W$C_cBhe8NcfeTH=jmOkttI}ot{gCt|MT7p-n%HzTGJnc z51GL>I5?;QC#u4-1t%M5(q%L>;(XYU%l2>IKXDDT`N^oMsTFt>Dl7ZO^*)XM;M4H* z?|bQ+q?W?uw^bkAG1E(*#~E8)Xanl!Ht1m>7chR17?$t|I_3fcj%Ui70j~Kxzz|bd z;ClOMND&WuR#p-_`YSifT8wq9AJng-@mBXSeyL;1G^WikFTjlp?w@HsJ&i9&TcXJ9*VFfO4RE3a2x z|0ONiy?e{?sVe)QpB%n)dXZS3;oHW7EC(z#R4Rz!|HjRYySuNp_(&`NxFpO6qQw5* z--~EO^3{!^G{wAO zYSk$73fsB2_?Q5gHf(KGzbsvNWhcU!|BSef>t)?|WQetct-g0GnG_b5D!=rPrw;o1 z?)@pSeBb1D^DIHy4W3Cte;E(N3_^Eep?3Fx!x|)SBRPMc9?b6ao*re=h_nQA1T+8n zPb~mX1EBKvg2U9MQ6a%}4Pt^TO`Rh#d~e=>fNt>GwmVmU*`80Mx2$$fB_SaxvHmFy z6!V310ru9AQt;hd4>mb?i7ZF4#}rBog`=#Kfug?q+^DB3-^A!nBxhD#rmxh+ZyxCX z;)9F`DGSRNcWHgVJj0F{eE=^aCJ#vc@Fmib=Paqne zUs5>@LNTCwhD<8SAS*s0UB`8CXb_evS>UZ$3ppIr+=f@lF!sve@K3^7_t}`?KoFqt z#(8v${}*(w-Wtj;uLXL`)8gmCom9?nbL`9RDM6%&Xfo>GPEW7c#P@Fpm&@y@sCdpT zjs22*7^|!Z;0o5K1OfdAx@qf-HD<2WwE}bcYJ4Ih@c;*+U$$~4Oy}^`R`Wg}R{#2J zcx-5BAP16gY~Thxq#ZIMCV~k;yLt^l@O$NRBvdTRxwweXP6Kq~kBZ#IjGB&;=8r{ZKLE0prFFCu z2#epp-D6|2?sPqidZzu&>DEjg_(q_1}cMkg$ml7wp!oK`a<(j z8~6q;`BN5_24$6;>N@Sak&b#56J{(-xf^oG4H5aFOT^{-nDGwN%|DvmSb_(n)DM7`ec{bbSY3a(;4A^+}Ka4XAggK*7Ufrw}7`Oh?=@7LY9b z)O0%@_X#Rm`sR*7`ltRNd(kA*SKbZ1EO*cM@B%ov{k~h8T%z2s?r0<=M4WUeYh)`o z<9mAE9vbgT@xzENQoIuU62Em_0yCWa?f2CL$vr__=0c+Jam@C30e1B~Rk~l*PyVvJ zMTy15@C|HNe0(GQLysmq9Mxwhz`e4^KDaXFgDF);p_*k>PuT}!S-aoZ1QaWyL$dd?$k}4ca?)u9CM^hXt`yR@sMFt>S(hj-@B2ik zsyVXsS|DQi6MI1hOUF~Sk2e@wNFEHw^?u?@WGE_9D1YE*V=Ln0=3I7-@&%hk%|HPE zTNEStj7bAeV}XvRPq(*mkz13}o-pPa=IC(Eo}iID)7KFBuC+D7oJYDz+I_0T!=@@v zGKcP3Rp__31@gWUWIvp1wM=~;K@dKb6m|Qs1!uh4h6ydOf#^9`q3*AH8tki28H#bx zzfW3pjhTT286UCBCP}~H4fOrl#xA92{(w~-(`N5_N@+IhCqV~XMx<$e)#aLy8-OW@ zf$_yPK_D~~raNG_OVN*L0h=K=R9@RGdAE6G4PS~fdSDut+0~gyAvT8TOFnhNobIcq z&AjC^HI>a~?cIJD2yborJMh-LCFFWUf#w=Gp%1<+8b?#$!h}qQ`XZ=>^KeIp8Os;3 z+emS6 ztf@p$5PJH2Ei6QxURjSxt*#{H!*a}ht}up zfFOtjBU`8GQCJIvU7gv>+?54aYKKk4;Rhz~#K z`Xm(pv(?>+5 zF_Xh$4UjI?K)_Sy1$JfwgK5`zg}Oi8opAc$o@GOU@BE8?PhCC|j%(Dd-oi%?2ezmfioUHc#f;l23bgCm>Z9Jvw$ z5hek5WVLi~-jkcF^`d1Iu6ZPQy}e%)FSSZ)x|hQBK~-fD-}S5>Omf^V(VJ z8(Q6@${mht4L)&lZtQNQpZdhSY11Vew{$|kPFT=``vE(q?#&k0STmqB{XHEG(8fsG@EO z)`4wjCSjybdxCwE*%_vq9&(HUhPIfmg@)~UgLNiTMGzd|f{Ny8LHcwKrC^X{g?unaelO?NYuo>gd485r~>cm@bJ;EmWwuOWn?UZdK)e z*ATiT6JP4;KIY9e_gnaVd*$0Th#Po!W57HM^xr9dg$AmT23O138NIoH;RjF=w|52A zRm`nS&7>zHnF>=28X8`ud>G6~SxgxpSJo+dTBKliKlr)Id@qWhoxO~c#6wwKdep4P zaR~{!pGMLar$ojq*kKk*m)VQfk!yljKNwvj-M@+J8TiQO%qyfVO#QlkDPnoNR>Q!T z9p-^EzPdyb{Rb(DhdG3CkJmV!M#j1KywwokNot4iM;Ma7kSdmzlScjc^{e0Staj;I znn0-}|7UZPg*m1t==u4dvDC3~7}VGq-uI+oI-SS&8*ojm85{{1h8;0v3Vtb>uI zdi(ypV`gvL(yR)?nJf@<*wls?Ogv@uAMZYS^vJuY@$@=EoJbM&rbCA=Z(4CN!KQl4 zj~_U=3VmsrWhvQ84F2DZJ7c(yuf3KQ|K5zbo##fG_y|+)s-DjyuDi*VSSB}ae3_n& zF18NA!(A1RcxCsbt5EQnT1_aajhhsq0loR(NnatcO!>FZRhTLT-TwCOWb}OHGw8k< zSMPcYEiiI@HL5m)nEKEr^WBp&Mcm-S&PaSvnSRQTn+oL(c0bq+OV^^6^eQbgTM00A zk8DID?M#5YUy6I53Rm6t!khul7|S&cS$*yEpYA!$@=?;)m{}Z7v|?&RZXd1WBulOf zyAYj zHWRL7(!UkUcgI~56}biBvn_~0g!G8uvtWbEd^r%vgwK;Brw`9S^dWkm=f3Dm_boX< z-iztsGC_Ub2(Sf|9r?z@QPGVt*cik9J#nLW6)Z5$O^p?hm%^RwaNsPWBRJ`rPj^ziLr z9KifB1IVxT)L@Xlg(pO7b1T@|@?Trt<-%}bnUT3{fq+MghrPLid}H{yP@`e}^Ky*a zxBs8t?sg9bAs%J@%kcbsE?-|?U5K2xFjeWg-gfG?gqjx{+7b+#-NFIa*fO;OvSMk^ z*y(SP)0a#^mF&{3~fee2-_s;sS9j_fJ z8DfTn$k^K64pxC%K@o2WBva`*2yKpzHkWo&1#xA;Rj@p=|E5uqJLy27NHE~d$-&VK zDST+_((W-cH;3OLUn$g@KZ8*9{2cO{Ft6}Vf@zRPM<&Iqc>kG!WwW}^EQEM*P^@M# zQQA%-?;EC~r+O*QPcqaDcV*Gw@ZxEx<;6$bnPZdxlmDFl!_{A3+qFskFE9FmobIy^ z@$rFBfP-r7OGs?h=PgYV=oiHJ?-inLcYFel|E$If8s7b>_~j?9V)R|EYXb;VAqw43 zMe?4WXiyf&5Zj}PtVyOMbjHVGT#bTrw;lD`+7kK6L`kWSqlU@8PL7S}-g*nH#LE7|<8fRvBNWfxCSVa@YEj?b7Ok~IeW}zJYr&0R~sclMC(bSV#Rrvi`Y~#7>s`~{;$@)GAgTZTX)f& zN=k!*gn)#AGzf^KARrymAdPf)BT6?SAt~Km(v5VAbcf`9KivDCv&Y!?oO|xLYvAw? zeBWAg&G(&iKJ$6z`(+Tr_yBwe)khQ>kDHX-^Ch;cwyxz-4u|-(MV#z+tCjr!5>==; zNS&#MqLfTGqvE52@{WEv+`Vx4ArE!=fiEIs?BNo=y)TH=P{Fh7gMKBC${1o+GbMz} zJ3Nf|#ysi3rU>~lJGx8=Swfi=F(xu#62sp}t0C13P+{TZ*YfoRg|%;wjlG{Vq!Qu$ zv2o&34-R7FaoD^M3DaQ+k7U-7NZi>=N={Bfu?(3@-Mp?Vai{q}pIn;Lh&d4#wKPih z`LJ~>$48U3B1 zwv{emzvO=Vh5#YHpx1NTB=_YmGzQrIyQgv_(qJ1D%le63e?8x%sHz$W=u=em^e6!6 z5|NR?3L5UpuW+%}jFn@(Opd1-T%Vn8|3qkJkAdwA$PViALcy3lp`Jfl@12PK z?;6m0_NCyRyV7jt#G8iVlEs9D{-m@mM;yjomSqW+2=42XsbbS#?kDjjg&7*p88CMR zi#`#wd@0-;KT6JenECaIJ^`@Nm?+BmY(R}AL&^F|y-RngE0E!3#0 zeb4Y11HB9EILMZdurh~r0XakAMR?o6D?SiEO#G}PVogB5Vt|0!tneg04Hqi4cEVwh zmy_!RPG4&Ii}?n>hlxFkiZlZNcqPt9Mv=fZIY~i~NdGAxENTbaA~;(SB|70C=`9wR zKY$PMQv2-ZPfbb(whD>#7;h9bGF7VIx%s2$>8t0*>rp`*N_HA`JsS;A)sFl@a~j( z>ZY8_k@3p|drOq%YfNQJluNY@Q_H~7($=Grvx<||(xk$=;(U=qQzaZo}-sdI+OyIr;5=X;j|x zdAXktWg{zx=Pr3YGl<`0$JK)7|0_}x+}UFHjm>MP+De2259ItxU-yFJ^2)XiwI%1N z%W3S>QkwfX15(KACc*hxdzp(@Bm?atu;9V>-F)(i8|WlMWE`ep@(l?2Q^Cxb(QzpF zib_P8d7E7l=`Sb(lngKA(y}l^cZMtLrX?D*=CgLjX4X>POHPiJxo_-Vmv^_NW0rlW zOX-#M`kMA%*n+&z;6)~;sEOu}^lO$0?ujgLCs0HJ4`{ZQ81<7z#8Pe2r^SM6_`LH7 zL{r$tEUf7vc;(ADYl7;Nw2QmS z1_ig!|86f=ahSBQh=Lqt_b385xK=M%i@tzl1VG(~%(#k_`ya!pD^D+1Ag}tlI-AAF zLC3nAS)E(-$7k~xy7qJ#0~h7v`eiKzHHzJJTc*R9zwq$ zlTX13m8dQ3Jw?O#Ah3Zmd`F|gVsi@%JAk(;5a4-Jg(K^Sc5Dfqoqg%rFqwhS9-S2ZgRxEH?CqI{b;rxcU6s$Z|Vi4qzVyv8_^bm)sp1Ym1%}?Dt1qnn<2Nexz zHXkVqOuNu9Ul!**IWeIRDCM~!VP0=nyL9?culn}F@iABzTTDk??a}xoK}qHDlw_HZ zMYM%J&AwKyZeNhh^E$Tna2IC+d4)5`Z7Gf3U9dtFLyfibRxgyoP;IT;EM)&542+5n+LCia}m6fW>p69gBB~cn@&!K*0$$Y0XhZB z`rAHR3RnHli0gDka%MtO1s*8vX@{5e#89g+>J4vLuU$DOyWYBMUR zZ*t_8>Qr_7zn0*hxql}*EMdEHzj0HO{Ux}gVFezmbp1eQDfMijE#ACHq_5tYf!D2C zb!JGjdJ*;cHg_?P5jcje`Gd(Fc~uJ%9l`gq ziiv*d@g5_3crXasqY5@QN@-3XFa)0`Sp^8wi2^HA;Rf?*(yti{wH+1~Q=QgsQDLF` z6f2;<)TJ7;Ti5RiWnG?jXtLK>Fmm)mf-fZ=E;BaxGogwL=!xw&H@MR6s3~l4rh`I( z)g8MFjjR`Kqt&}Y+%$B&Xrg)dKQ=>Q4IXg2PCJySN&ILYPrG<3D)q#z&I+r-NU;X$ z%P=6^s@$_Iq*g$Rf|OGPcL=&P6Y{DDn2=Eba~MK2tl3!xN9c$ghk+t zNw8L(V@73VtUrmq7MMKb7ih3Hb1VL%;f%&)mIw7A{yDn8LKvFla$(hS(X$2d-|fLg zCo6<3t>m}o&zG(0@j}>__D_y{r82x21t~Z@Z|n-LCTF4o{c9Q|M!p+XuT*&Ix=rhM ziz@FEpi^UEcTQ~)Emo>l=^g&~j7ishc9mnb+~J{N$+Kr!=R+CT zl&TNubO*AwykZGg>r3z50-Dqni-X{e^UMy<=?|6ish#*nqwp;{M6lbTL%2Nz)i*u# z`)z@g%l+BC`&s)5}+O(!WpMt;RsdRWSul5~rX1%ZD(tKj^ z2hMuweh>Z3Tyh!f-~q?gyCM75GS%0PUj%N+`ebE!E^cqqCdr-w0U%6Z zhdb_~lzK73pM1$5lX15e*%Z~ST6(QK!T_5Z9X1*QeBv+Ue_Nmws3{) zU0Ry8dDixdJ0RGofg)Qn60CeA)U>A^mZZJS0GQ-9#ed>?X_=!mCUen~vX6&qeFJEc z$7^v~y-7cMZNLYAkHVFCTY=wGm7R65{kCdd8Xm#=&x{z)vC!?7y2^(Gnnpk&YtgdW z^jq-lW+0!@v&ik-5GbbE2pCt$rE%7WRl`{9p@m40 zE?|fOJ{b+*y}}Y{P=be!h7N9DyL)^{`*WR@HC!-haLeH-EOuZxc?ja7_2SRb6=PIr#EOFr(2rRTjXFDj1#8|^Bwp(tUM2%P9N7o}v$qDw` zBq<%vtL;uB*p+e|&(AkL%$rsf6y*YdQ-wjZ7c5VBXm#)UOIys9_^&O5Mk@hqhp>of zIFHw+dvfp@xGRKD+RHEZtezG8&=3uF*qYEANal?Y^1DC=j&=`=;01pkEN=)GFL;8y1i-bP0-fOPexu}DTa@L$6+C~DodOe8<1$Xt^nkqKN_D$gd~%dPt95-=Y`paX*cf0G-x$G1E)*v;m|3!Fcjwd zo$l)aW}ssYs9i8=2~#MtyCOtDT%A4XBxG6qCl_E60m>|HAUqsW_(UNvy=I>_aLKI} z4j%f$lctLsY*nFo^7W&n=WiFr#yA%*CzPmPzJwslH*W@0heqJDBz!l^jH9nd2;J)b zJd}FF*yp^hSS%0#PWH7~a6c0nQSZ%cqs-tugXEuN!hLz2hs*Jv|C3%R3{wN;jd%bk zK=c8mMYVqq!8$A!Mv__(x()Et1uT2i;#{w(kv^KU6^^E-Pw|;Pk$L>A9F$Pr zOO=sh5o?6{P#?8}rs^$ca=Ua#)J~EecT}TXTuhTOPSIs#xI&fo#bSSkpL(IAEQY${ zCL}ZMB8MT(>9wvlyPEJ%CT~uW@VBK^z@PTf<2#=M0G~oJUkYzUKHuWU5d_d@vQM^mXvm)~`0B z(rTlq-(i*C;UQcyqRFKdkrlGu3Ic-bX(8WEcCBbnV{w&X*Z|7ynwL^}W&eTl4-gX#hhM}mO?T^BumrK*KR{(RkSxH{N9}FWZWuO@Q zY3#8sKyeC2b)$nBb1tS7WqeFJxjDE-1c))D&_NadduWhAFcxTD($;mcx%9| z%Zk83x>w<*J2;Ora5*Tt_s<)VIy;wR`;(To)&oN?Peqi}rgC=Zm}^LyBhSi-O*8x| zYvV_K0Pux}g_qEc@=PARy~5@O7sI{O<-}~DoE?$xvTqNQ;h0NxTEKXM)dRb{yxN5V zl5)M_oxS~;gKFNe;i=)T;|2#s1qRf{ekQjIE0L->GbZ1S121RxsO2)g!I1?4jZ~dT zW3T%8Mp2!*=gs$-bx`XFOcip|<>ah@RLjrP{yw5UJN%egpLQMLz4vukj^D7>=RKP| zFDG2WXO1uTy1vwTz}pBzZvOCovZ;P+#q}dghD5%lRv_;MJ9THh&Mhg^yvx0tVMP;W z7KiL;frqcVGo)qODL66yGB&Yv`m{e=8dlRn>%$J9G!mx5r?wcOV|HHhr2fb>q^ynIY?Dx3 zoJI*zc8iKyuY8paN=ZYH7KNkqhJFO0_PO`5*9H7)^`~^rJX;7Hf7g0m-T|{3V(vIB z!@^M18_^PK>G!;`LTqmr{-O|;zkszz&5lVxX|GucOYk|8Y&I63%?aj!jF{}Em$2`4 z$BJEm7Gad5h%|3##HUR|Thv|g_Nx!`)fI*e7~Ux^mllC^d!nPZuXDTR(l<8bg+u9r z`l0D;-fK_e!x3H_rBDQ`#~EUux@B4w9Ape&3+LM#r*?8BK;4Ow&gQXRgkW1Y2wn$y zAA|{>N7Iu5(+orAxeToxZIsleyDaBDvUVimP6|3gM`1P=2?V0N2AV`wjPWmXLPp|Bx zK#qs@Dz@Bp;B!5{Ta_gf%wLQO2@Sg%#bh@5KamN}u8^?7QfCLEC(DVho+1072zt$+ z_ufo=d}Ea7WUxvxA!hV8#oV-IUgw&WgVGImWppGR6~|XZDKO+7{r=qP&d{XtbCt3g z6o-X^M;w@m>dGm6{yy?TVjI!4?k>-{%SdWB%|JFDPAPbS`lcpFV%w$yEP>o{v&r&w zB%P)Fw>sP}6+AcaUwb&RW*QCn$z=P?nZ0ngVSnOwgCw^%jpTatJCUU$Cp-H@ zKVI<>D=QAbqT!(Swe_~9(8pYP>+l8!HfNW6EwfWWlSDbCR1L~exH!H|Lg0YhQZRa5 z#5w#g5{B?71`T2}Y9$Lxa^Fh7&T>20mn>12xmt@5o!-=knt@_HIysp?b6&$RE1}E+ zoG$TTJr^}}{$S5noTlY)tv5QB`#deUsz13y2XZ!)Z^OeMV*-UID(N%qj=)CmY+u|i zXJ2XAB_u-g_(UL+P-gsWu0E81B&o-hglwp;0J;vFua4C0Nflu_>}@b~0Af44F$REo zrsD3VA|3?TQITO`VIobY*;}tBNJjrWWWsvDC`*oi51-v(zWxqSZm`{{)e#l=@Fd?> z9#+nFe6Bw#d7GFK7A6j=`RZTuuH>F%XWM$E0j2Tr#QY~?N>jBCOrSxesPPHIUF+ea zxnww@U<(Cp4n{`w*Lj~hC=CHmmU~%35kKgK>VLAn0O!wW+pJ>+ zfI$-L2udI^pY2)32-s`n9wbir;RaT#j&n5q+MgySn<6d(dSCgtD4p=D3;t9 z&YC*A%-LVtOV!P&_tSjR)fxU^?kkeOne(F@Bp4liQ=){C7$``^Xrf_kLKwHE4K-7H z85M1lORNxC@@B92{peKGhu-dPFX`-7&>!`tQu!5V9xwBu& zge6-vxhZYkt|pTpSU1`gnY{Uyc5M7|nDf)Ii4MLoi>TUb{if6Gy0ev@Y3%k(lH6&R zap}{kb-k2%^l$FgHxtj4?^Clq#U2)+(ToE~b(CtuJpfH-ih%X9UHj9} z-Ce*e)K<@&TkooV2E`H?o5m_r<=YHPokK|e=*gh_BXKR9N>|Hdp^q^D%S~M%Alq%> z_E~Fva<;Z0;1;Q6+2-;x#D~IymRe?d4+gC|eg6ldo64i&qCT@^Td>c+?QSXHUV!<_ zIX*2dlF3k(?GNK~a{pl>&>4Q>c1g{Afy{sP?Y5l@ZRtufDi`H{ck`%#;IP-h9VqwJ zB@Ci9?M~s7%i;pUef*q=udeaHiVJ`6411&T_lb+ed_4)k7a_=0*Qq!M$ugJ0cFx@e zG=P!~LB}@;c^1e!F`=Np7#mhM_bWS`>hNh8uNKTenHt5#vWD#mZSB*0kyRQK{{{** znraHPfyvN_xskiv|i6gZ~O_ZH&D2#g1p|`-&6Ow3OEW622py z-jP3F?SXB{c+nr>U+O{tvH^fNVa~kA0|I)v{l(1;FbM)gUYO&7;1t(%i+NGY`1k$H z|HS+Qe^%UxU}b_H4a5voJ_537E{Tox*si3=_}V?7K&h{J=60I6{jmD zp;(N9@Yg9>N4z!NybBY6nL8z$&XJ0MnlP?-IBf@HqG4Jk&@1h8opV&gqP3QT751}( zE?hxpe8Jnu+wCLs4i;Q5;xCxhy~Y-!6E4@&Cqq=h4pLzp3zw zUZHxa{SF0FrxKC$3s#MX@g z?ZEBj!Z{EG=v^lBPT8!;c6*p&s9SKigXf$2ljlBd3{{`TnJ>}eQW*o%Ln4IhH?4E76JTbGiOOBeJWC=d_eBzVqypxkh?5?Ey~1Q--s_|F-MZCZKw zHczQhsJWv)1jA1t2qY7*+!ElLp<=-6+Y*O}s;M7CruTfP2)~o-%a1nwed+*jKUH%^ zCIm?8P0Rb<{{$4BByg0FhJ&A6J_mG^RCmBJ7c{i3n%Nt4bi2M%0E!gsn~kPn1ve5< zJ#=2UBm?c|YU3Y5h)S=A#odGDb^`fa+~`sH=g%b?g(bg_tKEL%HiFSk0P*)>rfaXb zStVei#tq(|`!-{<{khPDk;%7T-@7ss;L;oiV8A}MP6l)joO^}Z(0_K6w!;z5$rHAZLS-@vpiz0Kr#Ll=DJ4G|H>oFoM63OM$v-hX!YbqX%j>E& zfc;dCu8$EA@TSskc}Tf?R@t4-o$+aU=q<#>SN`(wlT994pUh`{62M3ehmqz6|C80y zJEL;qM6Lg)ofE)?sw~9^6VagVg}SP?NdVAsz$&L`4jv709hlS$6KV!(OFb*@mD%+K zDRrl3@U$yxupm!r57fyo^uqCcn&TZKD^f(-UZ+G##@*_PH=F@t2OuP5&``sHGuYZ3 zHwU9H|hd$+IL_q7;<$;d%R^@Ezp^`HhPU zpCE6RNchBNWj%mce#ntqUvJazi&$E+h!$NiZ}x-9Pv{sJaGnnf;P*(c1V6eY&NmK7 z?U`o&3muGs?lBsaUYKw8A$|T}T8Ih2U+^bG1!f+qocd}fsW#8X6I*6VCJoZ3Yp z24LK(uKNa^$LCxZbt#M0JY&hvC*Ri`6&2TR=EJ!bc81Bi$r_?|bpbMcJK_47V1IE(mff*WL)2+op5gp^@eDY>b(81P5V>SQHuOz5>r+s(FJR39(c5k+=f6Z&#l2KwXz>cv;*Hf{%X^txsyCTD73It;)OY8-nV-i)6E z`90fusbsWMFH6xH;L*2Q&Qw(;Q5Y@krEQ)#0`!O9?U;d?kG#=s`6W12;nhAtWfM3l zYj=)kzsvZte$a|VzDs_73eMEpI`E`4p?~UVY6Y|wM=C7b)Jegk7L6VsNo!LGdaUB9 z1)LK@UjLHs9_p}yEs{oGo+HdPv2nZjDUH7P87Bm?i9oi?=)gKH`TagH2aSVNetvpd zolL;xK7{*aSlGgpV&ppEBvi+~de-czn5tHI?}A4tt*1Llm(JX-G*qneVF|u3JRDWw z8zARq6xaA<7B{xu2D55 z(FLzDSyUd-!NK8%hy$g*chOhm4ip@H&~a)1OzC0$?UlPC-V^e3QjgbYKpub_(oyo3 z88mghMnR)k$Lc~UhwFqhoRPR+pB+ec0qf<5ry&dg2@>dM+cmxT-b9%49@xfkSh3o~ zO2olk;E{al>{=}uwf9o=BYZd*qY+%&rY6V7ob`c!sh`nAufL7MCY3B79#L};zr{V| zyW^UIPpyyo%NqsU4%8h-LceP_%bl$4lwMxK+y$zZ{Ui&_Z<=T#p2@glEsr* zfa|C35C7WNo5M0{atZYxv{P1Pb@dQ%A~G6L3_c+Bzr`gb1OcJ+of|)BWsm=Z01JCo z6XEVK3GqLdw4heHqci@SFEjup^52W1>uznp{_x+kT}g3)<|MhGK-|&Mv1;q-KW{n{ zE<+KCHYXoH5Qh@9a{kGP)n=fhLxcrurx!@=u}1&dCam}I@qIv5F0QO1Vo_Q`wd(Ni zjqyLYxBp<`YJ*J|2S@0N&DK~(GDdX3XQ(87WQ_Wu8z$hrPVUr-#=-{X!Y*W={32`) z8d~<}Top48yrTbaT;G3p>zQzhKqEA1E4K^=$Nc^eoBSsZbiD$g83R#f#HV}oOU0vQ z|9^}?{Yv=mWv;pda<9#5H=gnfad2d@_msY#V)wT@Vm0uxL(z(g3K&-xmjo`MGT0jt z3nSQ{=s&iJxHtfl-Ch6UoBxy7QLXqus{7s3$=fOnp3BErIF8<4FJMldDd-mQPF}1{ zOi`08Vq%h-o<3*BLDG^6@7_IxU~zaTS1HeFY$%FDDx0lm4f29DAz-9%@v&?mPEwUO z_yluzTP!#@6Lj?Un&oARLZN)tpKA3^>`?k+2EbW_<99KU`rO_LL3H?NFnuwM1x&N4 zC~vnALJ*W&pwDh11;j9lMG=lCoC1G7I*g&<4~Un(vtY)2{WXM>6Tni{5|Tig16iTS;UbJ3*?^&XpB>K(q^?MjMPPj5L&h)s2bCMlrGg0sF&%8!!8S!0i2P}MlITq| zUM?2S2bLu82P!~%{Fm;1QFwKJHg(&_Q9U*SA1pEmHQ z!JBM8!Dym1s_to&D6rEb58!t>1f49YvvaQ8E zS^6n-n*gMXq;I6r8$SnuVoew1Y3_-SqrAmYz`&&F&!5*wS(f;NeYr?;H#gO&HVMk4 z{FwAIfOBYyATCzxfKfxpR=O`?j)Cw5FkL}2-(+&sZ$g>2q%;^QQ!pkl&|spisoC$? z9`LuR*x5@5X&XqS>_v+~&;I5bv?-frUT&LL(5PjJnDi3_u@Dl5@MWW6OQc96H8hP_ zIs;Z7Ai+k!B#C!XgMWVXJfUB~bQ8qOlAMeuT!`m&@C8ADu~zOz@(6QL_MAlOc%M!X zkjOT-w&o~eexCqMA$59bH2u>Wxv9=(VVJ%zJOCmVhpnvPp;Jy(lzR}{Y3mC_Ztm|= zKxk-ZV9izp=05phi&C};XA%KJ*qra{Rk_OW@V24(%b_F;ck74atLW(b{uCl7h8isH z1Ew<4u&{*38Qpzm$#2usy@klbL))MiGzCGy9ofZA0gbhZ9V8tBvIe^8-->H$b6`Vu zO2B9-5ob$ee0K#vYIVOn%}7s&Kxw~2aaN&@H#7wKtDyMHbq(L64O7YR($2HLI?L~6 zPX-77hJd*wpd7}1E3 z-zM>V)wcj^2S??ig9UkEUnY%=Pr)B;XJ-cbnI_nAcH$Kju&4CQ=M;S%6mymX||BQV@%PB=jW zpW;#N83mYGZI?`|)mw=`&mCmF_3$=Q0>6 zpt_HWj}HQmHcxaH0wnGOx<3#l`i@-;h^92Z z`8OLGD6j|7=e?j41qy&dQMk~lgWI}gOe!D>0S4mvd+vsWyF9I_Y0IZ8H_k1r6pvHF z=^f@(DxlGtJLOcOp)EBV&ljnoeu%JoIA8!l2rplXizuT3>j4oAI0AKK1lua&Ctk2N zj(0zHw%j`ElX7*)`dVfu;RTJFI1NyHvGuINY=9T6_vz)#+?Q-x5fKrEZ{y$xjL29; zy^}*`PI$Hc9K3)3zHs(yJ%BW_adC;n=y)C8qgT*~!U)M>!5=c=EGlW$Wjl5fe<9zBkrN^DmZvRj;FxpO#5L35>kiuQRkHrY zvAC!uGnj$$l!1YwdfovWi33Vd&N3Y=ypW260V%#My25~w9tI}K+=JuivE+lQB~&3{ zL%E2RCOiWLa*_>n)-T8gfepc)Vq!V&(lhg4gmk9O2tu&qwm$DZYuejvB(+?SR~;c_ zZVpMiDep}kp1A`BNmU?W5zrc^5)z_>(uYkHbaaFm&GCV0@{~8Fky36Ae&0oX&U%1$ ze~OK5HQ~%GFTW2(rKZkrT-=_|ELdN)2fwd+-fJWYG%FhMOJiT6vKSuc%~*B=rMKGD zAEK6lR|>Udwtd75U_d^(=tq2N6&}%0F=M$EK;Qxnm$fuvgN#qB44A!`$i5oZaLruP zt$D0*K~9XC*0vF5KqE(*-0N(Lbhlc&W8>o1C%_wtaqQhiH+p^r#`-ia4bY5ZM3rkDOsx4mD#~V_h zsE*VQeHMCW?#z~7WS@t%ShHT|fjL0a8;(GnC`a~r?!!ORaUd}Jzm%ZC?)P7koXcCX WYDVwA#uRl3TrXe9i5H3L`us2Li|QQ! diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst deleted file mode 100644 index 2460efe8..00000000 --- a/doc/source/architecture.rst +++ /dev/null @@ -1,243 +0,0 @@ -.. _architecture: - -===================== - System Architecture -===================== - -.. index:: - single: agent; architecture - double: compute agent; architecture - double: collector; architecture - double: data store; architecture - double: database; architecture - double: API; architecture - -High-Level Architecture -======================= - -.. The source for the following diagram can be found at: https://docs.google.com/presentation/d/1XiOiaq9zI_DIpxY1tlkysg9VAEw2r8aYob0bjG71pNg/edit?usp=sharing - -.. figure:: ./ceilo-arch.png - :width: 100% - :align: center - :alt: Architecture summary - - An overall summary of Ceilometer's logical architecture. - -Each of Ceilometer's services are designed to scale horizontally. Additional -workers and nodes can be added depending on the expected load. Ceilometer -offers three core services, the data agents designed to work independently from -collection, but also designed to work together as a complete solution: - -1. polling agent - daemon designed to poll OpenStack services and build Meters. -2. notification agent - daemon designed to listen to notifications on message queue, - convert them to Events and Samples, and apply pipeline actions. -3. (optional) collector - daemon designed to gather and record event and metering data - created by notification and polling agents (if using Gnocchi or full-fidelity storage). -4. (optional) api - service to query and view data recorded by collector - in internal full-fidelity database (if enabled). - -As Ceilometer has grown to capture more data, it became apparent that data -storage would need to be optimised. To address this, Gnocchi_ (resource metering -as a service) was developed to capture the data in a time series database to -optimise storage and querying. Gnocchi is intended to replace the existing -metering database interface. - -.. _Gnocchi: http://docs.openstack.org/developer/gnocchi/ - -.. figure:: ./ceilo-gnocchi-arch.png - :width: 100% - :align: center - :alt: Ceilometer+Gnocchi Architecture summary - - An overall summary of Ceilometer+Gnocchi's logical architecture. - - -Gathering the data -================== - -How is data collected? ----------------------- - -.. figure:: ./1-agents.png - :width: 100% - :align: center - :alt: Collectors and agents - - This is a representation of how the collectors and agents gather data from - multiple sources. - -The Ceilometer project created 2 methods to collect data: - -1. :term:`Bus listener agent` which takes events generated on the - notification bus and transforms them into Ceilometer samples. This - is **the preferred method** of data collection. If you are working on some - OpenStack related project and are using the Oslo library, you are kindly - invited to come and talk to one of the project members to learn how you - could quickly add instrumentation for your project. -2. :term:`Polling agents`, which is the less preferred method, will poll - some API or other tool to collect information at a regular interval. - The polling approach is less preferred due to the load it can impose - on the API services. - -The first method is supported by the ceilometer-notification agent, which -monitors the message queues for notifications. Polling agents can be configured -either to poll the local hypervisor or remote APIs (public REST APIs exposed by -services and host-level SNMP/IPMI daemons). - -Notification Agents: Listening for data ---------------------------------------- - -.. index:: - double: notifications; architecture - -.. figure:: ./2-1-collection-notification.png - :width: 100% - :align: center - :alt: Notification agents - - Notification agents consuming messages from services. - -The heart of the system is the notification daemon (agent-notification) -which monitors the message bus for data being provided by other -OpenStack components such as Nova, Glance, Cinder, Neutron, Swift, Keystone, -and Heat, as well as Ceilometer internal communication. - -The notification daemon loads one or more *listener* plugins, using the -namespace ``ceilometer.notification``. Each plugin can listen to any topics, -but by default it will listen to ``notifications.info``. The listeners grab -messages off the defined topics and redistributes them to the appropriate -plugins(endpoints) to be processed into Events and Samples. - -Sample-oriented plugins provide a method to list the event types they're interested -in and a callback for processing messages accordingly. The registered name of the -callback is used to enable or disable it using the pipeline of the notification -daemon. The incoming messages are filtered based on their event type value before -being passed to the callback so the plugin only receives events it has -expressed an interest in seeing. For example, a callback asking for -``compute.instance.create.end`` events under -``ceilometer.compute.notifications`` would be invoked for those notification -events on the ``nova`` exchange using the ``notifications.info`` topic. Event -matching can also work using wildcards e.g. ``compute.instance.*``. - -.. _polling: - -Polling Agents: Asking for data -------------------------------- - -.. index:: - double: polling; architecture - -.. figure:: ./2-2-collection-poll.png - :width: 100% - :align: center - :alt: Polling agents - - Polling agents querying services for data. - -Polling for compute resources is handled by a polling agent running -on the compute node (where communication with the hypervisor is more -efficient), often referred to as the compute-agent. Polling via -service APIs for non-compute resources is handled by an agent running -on a cloud controller node, often referred to the central-agent. -A single agent can fulfill both roles in an all-in-one deployment. -Conversely, multiple instances of an agent may be deployed, in -which case the workload is shared. The polling agent -daemon is configured to run one or more *pollster* plugins using either the -``ceilometer.poll.compute`` and/or ``ceilometer.poll.central`` namespaces. - -The agents periodically ask each pollster for instances of -``Sample`` objects. The frequency of polling is controlled via the pipeline -configuration. See :ref:`Pipeline-Configuration` for details. -The agent framework then passes the samples to the notification agent for processing. - - -Processing the data -=================== - -.. _multi-publisher: - -Pipeline Manager ----------------- - -.. figure:: ./3-Pipeline.png - :width: 100% - :align: center - :alt: Ceilometer pipeline - - The assembly of components making the Ceilometer pipeline. - -Ceilometer offers the ability to take data gathered by the agents, manipulate -it, and publish it in various combinations via multiple pipelines. This -functionality is handled by the notification agents. - -Transforming the data ---------------------- - -.. figure:: ./4-Transformer.png - :width: 100% - :align: center - :alt: Transformer example - - Example of aggregation of multiple cpu time usage samples in a single - cpu percentage sample. - -The data gathered from the polling and notifications agents contains a wealth -of data and if combined with historical or temporal context, can be used to -derive even more data. Ceilometer offers various transformers which can be used -to manipulate data in the pipeline. - -Publishing the data -------------------- - -.. figure:: ./5-multi-publish.png - :width: 100% - :align: center - :alt: Multi-publish - - This figure shows how a sample can be published to multiple destinations. - -Currently, processed data can be published using 5 different transports: -notifier, a notification based publisher which pushes samples to a message -queue which can be consumed by the collector or an external system; udp, which -publishes samples using UDP packets; http, which targets a REST interface; -and kafka, which publishes data to a Kafka message queue to be consumed by any -system that supports Kafka. - - -Storing the data -================ - -Collector Service ------------------ - -The collector daemon gathers the processed event and metering data captured by -the notification and polling agents. It validates the incoming data and (if -the signature is valid) then writes the messages to a declared target: -database, file, gnocchi or http. - -More details on database and Gnocchi targets can be found in the install guide. - - -Accessing the data -================== - -API Service ------------ - -If the collected data from polling and notification agents are stored in Ceilometer's -database(s) (see the section :ref:`choosing_db_backend`), a REST API is available -to access the collected data rather than by accessing the underlying database directly. - -.. figure:: ./2-accessmodel.png - :width: 100% - :align: center - :alt: data access model - - This is a representation of how to access data stored by Ceilometer - -Moreover, end users can also -:ref:`send their own application specific data ` into the -database through the REST API for a various set of use cases. - -.. _send their own application centric data: ./webapi/v2.html#user-defined-data diff --git a/doc/source/ceilo-arch.png b/doc/source/ceilo-arch.png deleted file mode 100644 index 7a3b42508c3667613bc786fdeaab250db91fcf5b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 94523 zcmbrmby!tv^fgK-NGm8EDy4KtmkQD$Al=>Fts>HF=?0aO?oD?|OKxD(-Q9WL?K$WB ze)qZee*fI%!z18cYrQdJj4|hH@Oya)986M7BqSspDM>LUB&2&DNJyyI575AGx@f#; zz+Y&FG7@4)cZfgV8*-w-Z!qj7HJp%;um})e$Vf>k#Ne0c&Qfyk(C09)aPgR=o$9fX zke(t*iM{#YKD9mL=KVqC_U=HDdq`Wji}cR_T-{g zZ^p$1hKYaWc4KfKhQc6;fhDP^`Ig>U^2Jx0*Z9K^GU<=5sAXx4qhv3Q4}FCx1>3a- zrk!@zrfZHCrk!@e#4{fKcUg>YkKKFz@713kCHUX>+^?l5|N9WxcN$30zZZ^ufptfo zN`6pSSON6oWVsmuKE9#rOfUhx`_)cWJ~K5hZvvmw>W5n2+v|(=jt&*AfzS7`ep&g@ z$*26zl!|815wRLC5YJaIswkvAu?U0OY^&$y*^qInZsyyJ;T6_VX(|`|q8hKaDcndZ z^ff2D`IBV$?xy5oRv?#Zb5`o+{h9n`V1GVU<9;iaY%+IvNl8hz)S-5*bN5e_2r3bI zRn@pjm$kw4s$XF;@hqiQFqTbECY9L8m>6S45-BMumAH`v#Ct10h*sGcbxEt(z!OT9 z^lb8d&1e37*Q!nap6H3FR-9rBgnJXOr>IO8h?uu)J(c8k#y}{whi9 z5fM>Wu5!-9e|IW!YO>4(%fiB9On6v@sxM+$qs$n?!NGx-k8h~S&oA}A4SmFDcDkd9 zfq_9oL-XO%JLSL4RVQkAt0VT$)sFU}oBCgCFhq@6{_oyWWa&fjdC77v-)r|y2vxD` zdjatCgV=f<8E~hsvby?j)~4&$fmDIu&`?G%_D_L<=reV0Yn^0v%W+z^EkmUqeuadO zAD0-kp+W8D1_H2%Bo!2Ls;d*m3$;dSoXo@{$h#*>jrzx(R(fK+kB1}*G|IEzjq8D@ zXL3+dQdZa1EiEmL%%^A7J%02kl7U>vGp5FAEh;KX$v{V18uR7LmtybU9h|`Qj;@ae zfqf1$ZFy`A)9Sh}+?P+~S255L7e^x@A=#d5O2;AN-k-86UfA5!v`wzAt~T{a+tR$t z9#4t30&|BJ`Q6=OQwpjhd?7O1`F!61@eoWbEQ|*a=6CD%=$M$W_4M?%x3?wb*$DFZF7Pbzw3&iNMtuc`|;z4yQin&D^e+Wc{+an zB)i6ouNe}MGUp8?-@B{1c=+9NcXa=yHmvd%vHs)#cp@hzCT4qcY%i;92_j<7dIO2OjTZ= zFF~}c?U8wScx>wr#3_Bx4W?&i7=_}Dx*|%gCpjX>`QEy^au;eXfOC{U~)AQ@^j;4S4;>CC69QkRVvzahTq1sln?TJ#e=`--Do?;iR6`_poaG^?8G>iub_!Gq;!1;^cKK5o0& zz65q7=_d2yfw?HKC$%nH?t2YqJ+x8d1sbB()=Yc#NAenm>zZ4z;uBsc zg@lA4na|X^WSx);y0e86u|lhHcxF&nPb$g-(+>tML%%olYZ7g4Hq|VJw*K;GwvZ@$z2|LwJoZ6T& zB4N`zBRy~et2^y=W3 z-O=gkgNWUty4^>=z1j0aY?H3{!8>d;RA4QJBiV9KdIa5e$*H{ek&#An6xPZIg^fBw zo_HU05DA{o`-A1%Kj@;0PDoJSEq8Qs@&KDa%wtQg=X*gL85xNPHYP{+3pFeATFvgG zqoeN6dhJEV$D26_$R)7-m|lvs&fB){mL9>O{fR&z52xM(UL8POl z6XIBOdZK7$z*eB&U9X8SCyC?$ONKaA+AqAdt=r*7INoz=>WhUys_Ki?WaNoPpSsCG zPUfbSI4uUD>?Ej(+?M{~O1z$ul2R*hJB@1lXN+n^#)zpOcGCk>6yg;SDEGbb)cF9y z=-+5=L7F{OaC3E5<(^JvTaC;ng5jX3_-J8a0XRJg2x{8e+7IyXSiSmIr`o?s85`4p z$kUg?7ad6H83g=$etTOMY<$hxtQR7vb#!!?ND&GM2p|DBeVZ=q4J-p|H#u7E2hLMEZgYOQRgBovY=!iQ^mHnD zdHD*Lmtcn2QebiKj~Lb(0% z??`D8Ct&}GDCoQ~9H6G>^901Z(1^;Slj4n_`4)fVbC8=D3k2-v zk&uAn=1(5VX7D0HrooJk+l-1qqQz}8*u4+xMELlvz(D$eGgW(EaBPhiF7@-w8G~gM zyj;&Y2h zms=_WP9Nq@ihFC41U(!BOl)m!u}L_-{QUW|?BcVm>(10W1A}LxU$JvJf>lPK1vV5Y zsHn*9cQbHhN!*}3S@%;z)nuPLPM&x)@E;P7odZ)$> z!21R7u8*uI%L2gufGCJ}dA6qja*5B?X(g{J*hcHIPpIfY!NCTjzZAa&2J)<%y+mLl z!>>6x^xfUvH1zapmuSDXK(t1hUs*A6^~qN)WY959IzQ8dK^${s<>e#ZrJLk}g=IHr zy+0?kTH5fPDW`BY6CJ|Y%v{ecudF47sr_wSMPUGX>b>JNJu zO`>Isfw@aidSohYfqAKDytUk)Z>g@SDYcsuc64-PBgS4{St&Ieqy!-lv2!rQw$-}r zAyBt^qc@*cmF>#;!4kQMZwd%LV3ZxGc!K(_hvtZ^^_-D0w4&k_h-PHh<$r?$I{+e- zU|!|m@uAUz?Ck7Fv5hRX-@kuvrD@KV$mtW(9L-Z(@xYku$x{ zcCU}p?~HFQPY7u}gaPIOFxeaqWyTXae%Ro31{{`~m!E%ee*S%&SWR4hJ|iC=pRt)4 z%=}dZfQTTU8UV_`q*EJ%jEeS7SzZ70 z1NC)470FmtOw41b?TqG*Ru&pA`Gf0Ibp;n$r~3NK&HQP{UgiobScGb!<^sSQmX>*w zK2w!e=&#?teH*^H4*(;F_qlzV`yz(4w6yzHVHHCvS?;sVx%X>5uOK5JMfU6t73t{4 zf_UyT1|ri(fxY@@kE7Lt9FZH07e29d0BFCemyQ;MSJ|mWNr;JQ7DQ4@;6Hr$8o*vV z0Ow*MWse^}26W&Jurr-n=iFnBt^Q;lHFYRhvH7ZQt6~$N|J83Naapamh1UvpLr!qN}UBJyRE7UoYso zHyfUllk+Yk2A~2Q#X!Xjab)wiroa@%e^yxq2L&Z8%+EL9-R|8jZf^FZ33<6+A9QJy z8r}~M4hCi(34jln7l;Xh2kivbGqs4Qp5}LZQKput*W2A44AOYO@86Ovy7iI(>w!fw zJ6P=ac){}&WN%~;fxrrP0>mRJE&a2kJ(u4rTZmzL`X^0chr0ty@jV50qfW+uHIitZs;8F_yp00610BEoMsy)+_$C36MI$3PmO-_YP;t)HtqSQcFZsm~$)v^RU@s0~i9N$$_IB*#!IsL9B3ac3$4dNvC`M{AcUsY=ajkcvHE^bd~jFtCn4ZiL0UM zi`3&&kcgPPVo$;6k=4sh<+4Ps`wQ<5ds#K=-5s**W;sZNJYSWY4HnrK_+2ki@p&8? zPu>-4lz#FV~BVxF|06;ljAzcGx;C(u277INBf>4=( zz&HVi9}NiX{QBXE z#Wn^HfRwJL>k$B0%*JPcEt!Yon6)HjWQ-QugK>lW{z6+mg^uEf5A6WMmsC}mAs7W6-Qw!1xVt+qMDS2D>nZ|J zGQbxQt_Aj15)e#48}8k|?*O6|yTx#(CH;J5JBYlm!5vnxq7`5}Aj$$b3jx8cv9a;X zmoIFleF@T31ea}8x6Q!sMKv`wC-WCUIPM4hN2A7(7Gxa28;1drHyuck0@1DJ?)FMb zR+dIsm{LTfsjMtpHt{VmpbLO-gTupVSy}Nwei08NZaLl<>7^xR7%4YX0JnCx24E!u zCJAtXZ-7+sR{1TCN0I00b}$e#K*r7i&}J;cxK8Wm=p~{)ZeQeZQX(SUj-o& z4KamU*BxnCkq*c^uMmOk@0Iz<-QE39rIi|pIHdn3q`xcN!xW2yTb=mw7-~wn&#=H9pX@WJwF4>7$`J9IBL5{^A8dNPU8NiC= zL&G*_ADV8Ieo#pV({TWR(z17bvYfub^CZu?f!0P#?<5xjEreW-s=y)KNT_y-eR<|7b*9#v;}gGbLmSMULo+|w8LmMMG%>3-TySpcjK$UU;vP9@-VStc&9GNWOmw5_46*fMj zIgL}}NrqW@MizbQ7V_$KVGdUKu3jQAg(f@ccLK(|#yfgsFB8j_u1SDw9N=p@k z3xJQA8G!554ZHBnI1~6>9_K>CSafPXXhsZ~SFEdpb>3W2&tu2{GKVRA(^JUv_*Yp< zXTkrEQ9Qdc5G~;(4Hy4+PsK7dM&OnI{b2p^|E@4voC1uX4#*0C0+!hS7>GsbD|*N) zE1*t+yS^nS%K&@X+b@HEp0lZ_sYwd{{YyYVE_*?O_@QLEiS)kn)_6FAW3{%v z1z_+AK0f{^FK~Epovb&1LPc;pAPXUGh!PxT(n+2t0mKhb2ia8qCjdNv&&90&GlE=t z5Wr(%V%YSXkqDVJGsPfJMwT>(x3$yfYi0p;2d4ITQ22@ofND#&zqdqc{_gJn0pf3} zfSb~X5BFYsdkX?`v=mIA_9sJ2^>=o(Ab^I?xw)aLSHgfXo}Zt;cv}$mvSkEVyKU3W zsdSa{&%bVvSq}i&O=l$O&1n*lNJIm1JHY-;*B-46jsRT*4Tm)IYZ(W3>T)8Nm1^Nx zDzC%yXgWEiFY1wixDc{vtJ{3;G6p(Nim;Dn@LvVj;y^xyw*^5X02uOEj+7XEszj^& z8R6#G#BAS(cYsp44zDUE?F-?Bevu}f;vcGQD1QgfCa?ebtK=sk5)+)mE#90XLe zg^i7FAPicJ0SvjwnM2^KX^%tUh|IoMDuMMXtp z|FPHP#{>jM;LS9Ae2L&5gq9}PIgO$t(Cw%Lo}0?=@?1nD9az#vp_Uw=rlkzP+pMQ5 zFg^l2fndvyTULjz%rAVxFM*caSEN;8{>Z?zfV~gu!U}t-~7!pFs z1F;dv5#`kdfUDTUZK@VlR_-?=_`QR}52v+(_O33*?vbETCj@;L6H9tILetCTA(qeK zxux1WTx!%+)&!vthy&lCuz>;wY5Wrcf;OPn9Gsq_&0nmgtD*8d^5Xi0Sg#<-Bc~m$ zip`|?nmDlJAcLz=SG`8S^KkKlAQ#1d^Ic{l@B)6YVl{**$Vqvlj#kca@*8LfFW=yxmn_U&iVR!s9n6AM*@>}+WGvjYya z$-E)N`!YKoU|>80a5*dtJM;{g6E->T`*3pfwfd#0$c)I>iR%l1!AHM&X6oHntpLf$p_D#J+pl9?|?Z$ruqB9pzxrO5JRBP zb4}3$P7D<3+yk-yovgT@Gc$)*R8)}eD8GN-4AAzdJ4!PMU{-=n0{{SHU}7pQX&?&~ znPdU0WSZKYG)pgZo&ka$Q&Oh;RE0&X>+dgL?ll6r*|PTmHn#EQ@dl1&`ac{rpVNTW`2m(7R1U2UE_>P zt33&Du}_bv#r4^UnP$5Xq6j=RN{~4sivYV=Na>>B0zD%A+0JyLW$*clIi_#CX0lFv zQi9=7X{HWKdq#*+F65G2AkJKo?=jIkRd zXH0?gyD;>Rg5?Ml1~Zd1bE}^rFXV>0%-HB27|Z!ozHF3u47q6E1I z2B`m(u}-yZ#1=7@oV5;QluU8B(i7^ZTQsIb_1-6>kjZXPi|taPI(gaSTKX$Rd6K0a zcJRLzp-UUi(5;;JXAE5cf^rg#_6x#%96eRBb8@n=x;0EALd+CyqvMvWCa9sTXA`pb zsH3vcpS0E{c{7RmxsqMYy$k?x#(MgbxUfD#Z>G(O#;1Pz;QAzDHv?3Z_{u^ln&K)w zK1In{+TYX7lZOjs|F%<1UawD0RMOvL)70-Xoa8b5C*rZ|Ni#11Oqr;(B64+gCENoF zwj_YA7&tg84@ z!~-fSs``e8?%%&l+^K+I4?^k~uqD7LC1queft@tfu31F*ZaN`ZdX`?0(845ZosgK@;O=LF-A(hRM4%Ym}i-FCcc0sjEsB zzbE?F&Ji^`^+W(H0N;Ii$%Sb&OUo4`i5e0XMsu=G7{WP_$1YaN{Ii;{6a&B%!;{T1 z;0Ih>vG=hEesj1r*eFOysi3MB> zNEQA^F;bNk@X{$8piCwT`*>z;#WQJu$YEHyf+z+f$}d}pyb7e2y#M7`xxz>Qu32Yo z0iiP}BEmKXf$qm8d`L48b>}=QEvFL->8TrKv1cK69P#g zE?FyUYp}iDo=&J?%2A2OHZ#1q_`-#VRnwV?;!^>GaBma`QrUBAMxA))#V_wxMkRbe zO?xXt7*XpbVH~S0tl=;;GKvMIRQvr+$^4|k4Bm)88Vw>GH&!@begCU1AyLt%qwVr@y7@2_bMuLzRH`T2-uGbmV@?j*}E3Z@QxE-05x1W+pMIXbT zhgbpK$5cUzlF^8b7{vOPXW}{HnLh%)e3_41|2os*&Gq#K^xb0^2&w&P92mn)4Q_UB zNNKv>Wu3iZnx1;dyi|4R;pw;GVVNV<)ipUoIp>@6$UyW86dV+hyQ+%j`x@mkQ-oy)#u6Nljx%(s8jr~xh;-0q zLv(}$qiqWxH$mN4jm-8whPI@R3*T1k&8E-)s3I73b8qM@T@)vM3!xPH4TOpzVTAln z`sz2sR`j=Tucb9fJ7NX^4N*~!k{}HmQhz(UB8wj(7Hv?LF*p9__-7kGKD-WREuD_UkvqC6`+ZTyZb-7>5j+glUwfecdv_|jUQ28RP5YLbkChJ%g76G zUS>Z6Sre{(6BV&zI+>66UP(L1P2UYo_MG+eC3f5VMqp`m!sz)D`Sr$JyLchp!EfBS zrUN$Ry9VjU(G3Apv>7zmtx5z;1XDR7zO9LJGcd=Gh|=3kNUF=V+-UB13#P9SB(?JB( z0Iv5B8&)oxLuw!gTesSr}tT1%F*0;Hgm=KQGNIDIi3|N zuqKBd{QT@a?z@jo+Y#_3tU9z5(iH5UF=9u?`zG)kvhxFy*RVH68fOdEN(1Bub9o9R zj)ThKr$R{f!nPt;&qdazgN2q&%oc}hac*bYstsDH@10U=ZurKvd%GNHjnqC_)>+61 zyo~SIEM@MUB)WLHA(dnA?79%lYAC(+xGw$1s&W49wy$H^aEY6J`}~4TpsxQqk15)Y ze;euXb=8->M_%X>zz68Qhw~r{p|k5RW)yhrSqxB4btawNw)X{9^78WY{B{`}cI@MW zu1K7U`%14~=sB=gkC@Dxf1MNP+I~?x+pzi*89s*=~eOM z?nB14n@=G4 z+i+}SK6k|~HJ|I;CWxE5OJ|)p+=I24Gik-eddE@BHT7;ZkR@CmMmYwjEosG<@9Nzt z+j@Q6t~er}tNjcNQ^)5+fnH%mPPWa5 z@}2hVx4db$I;32y5L?}zulHH?nO@!&ZWp|g-<$cp;gei_@uO+PZ@Q1a%e$c?ug$L1 z(;JTIdkM4K!gRz!=B;sJzVq1IaCBi{VR*{8!ld!ijIY#&b}8K#vMbuTk)#9B*79Q8 z@VlkDOXEx{SS}}rUsL-@bF%Mtp^@YFX=rJPJj?H5H&-YBEi zt!ouuc%i;0bf>2g)5r6A7?j+|5@u>j@>GUP^sG^imuz*u*CMSsgSR``zP%6-5;%+x zm5<3ih|ut;LP9ZXi2YLfLv^3692-DcBj(eZSn`GrV9;>Ok+Uao6@l_%XR!$!H$mf} zCAAFG#`kt>BT9+g2iBLz?~u@DZg_w2C}dT-c@-IFKdcjqq7bPcjAPZyFq&I%cUjlW zPbg;awNmB@v@+z{gkKytdNBC!CIv{qUt=CQ$@n!hd!QRhvHS0)yqhaMTUZ?<%ovh9 z?QkNWtxganBXl~B&*DBqt!7`W<&PLB!cI9R2650$en&%(rCV7K)?gwdQc|6AxE*x6 zOCdgcCnzK_6JBm*wTNfEkmGlQhsXo%r?JP48*Y{Svt`R~aEywfJ|$V5?mnO@PR-0* zD1HTMZuX$fhbv8zQ|j>E)%m=LMbyPyz2$QY?3^=eq~^WaU`#w4^{~>+gg;u@9EP1v z?&!{KLz7P+t`sDK7Z{f_r(SHNe3wUfCywOVJcf^#jS2o(u~i8k;!XS9l!DO3^9#L7 z@R|dOXbBR#<79buIh-6_XZnc48(*HK(0pemZRqXF*I>I*Gy(OvDa=~a=&aovuuW{uav@^HvUT~Xq@95;Jsi;fxsuihP}K|P{_ z(VwE&jaLrSKGzQR&YhEE;keIGNjSG~kXYI~MmOCpN5m^Pu9PZA^<@M{{hCR9_kQ zQE8t&=?J)woqTBq;v)LsX(|ZBEg3cs{pm$o{>cNHSe&UG1yJd3iN10R1H=4dvp-SRMXj)NnLCiKJTj4QL-ipn zG-xypq;7p+s@>AUy|XImWhePO$m>p94J%MSH$#PuL#^yY=8U!9bqHKpJPTpnJ`HJ8 z^!Y5EE;KmU&7x&`IRtb0L(FN?HyRvg8SXh>Tz%!LKiC7wZ)maW5mrphG^#`Di?Bz4 zrW`^F!=AY&Xtm%7IbMK=nx%`Niw(8RRpSR{p1NbMjlu>vEH~wA*u@uW0qI_1m#WE= zXVkk)L`x?WM=phM;3(FvH5Tn{*$TIvE62w7ji{ZD7<_`AcC(Jf>vlCX<$v#o+~vV9 zz9!4;nc*95UGs5;H%y(r>8K8p+}Uto&gIE|))kAEB*2w>+$;E9E0(8h zRkvSDE$udy(e=SvzSC2kBk#+Vu*XP(Q!3uhMK-WT{sSduB7XrRs`TD67^VJ9({13~ zpw5X_flkiLD=`z6EE?4*^gLM^nY$FXj4_az-q=N z5+$2*-4UJB+T*%n{h57nKSdkIG?a~*$3`uk$%jDI;myWYcE4PKH7P%FEGj8whLyK%i$!9;q$cd-U z?O$?57R1~OrGH|#oxs{xzTK~Izea77{0K*p$3L)ma>b_Z3kRnm^VQwaeFuSY`)4d) zu^ia9b#EV`DTsWP^ZF{Bk@HfLENDaV=hmQtkIvbqQbf&Gvc*8bV(VvqKY2MsfO*fZ z%LAGoc=^Y-&C)=-2`U=)U!qLwkvXf*@yRNHOgmon@v?3<8*7(!YrC`4`$-%}qY^jg zpVe&X0DPxtYwEZuJ8IDDGI4u%I<-?x^n7x;%ij9V?)-`~KA7tigI*2weocIL?*PC> zMnl5z#&d95zJrj>`p0% zMnN=90r42VNx|DmVeRRuSLS^@Vl#P3f9Co}AdfiHjM~H2e)HZjn&H{ff-MF}-EgUk z^}V+GYe2ae-;*^1h=IJ1NAl+>tf4kW`NKE0XK9#N($PUM$kW1|wNaW=OE~(IB^S`| z>t9vH1DbZ@^omnb_Rr3$dM*L_A9-Z{gb21cU_$xLvHvc;!S<$DWjopBWbi~gOngcw zHNS+RsJRl)*sWlB&(H@^S9&?}aPe5|2;=#PBqXeGbhi8>S%6XWZt^kh52>u zHHrSDHkSOhN+BK)^;kZpOB9lcdb*X^TXbI_pM@X8IF5TZck0mHZTwHO!op&8linY{m@_MB5l?TWdMD}hYL(L%3 z-k16rse=(dUr|eV4mr!W{K6+&e~Lh!S$4x+B0QW9;EzkEj3(}VL`yG#(gKp#EosnE z-}SpLMXJvBW}Sb39Ia+|EGuQ=*Kf?;f$b|2q^5y}%^i1UL?ms=4sihO|1u-=PEh3h zu+B1?0XU3C0*X{uY#VRb@`4P>&W*YSdG!t(D~ ztHZIed*jf2xCO&WtpE~}tTfOuTDLugifHBX_9jTkr}mj8{4S9Fb69^=$!1lm36zbn z*jx8JHvk&Q7dn@w_dQQ$q zc<#_G$@Rh!dq?(f-kVPWBEs(oxHvz1aX0ZL z;M{V+9Zl9VX25`FVRz)y&(GQ%tnE&QaA^e1es@t*v4h{Ue|4KPG++h*+@tbc8Tno9 zwkbSQPUoI<^CcKiHe>-@8$7j}{rU8a<35b`mnFf`>SEQ8P3WorWINTg^g6Th2B#-K zFe!lcn^%spwlx$lH8*Mn-KpO}*eCH!-%Q^$?V>6*(j8(1B6PKBM zIlgRR60%BCH!8ncTvGq&&`;%o&~mQ}38Z^?4GjZbT7O}Lbslk5tko$v{W1aSFyT=u zxw*NZ$n_Dl;OO#Lo_zZw_2XDE&p0FU+A5k`g~nd9=JogDNB)fWDw$^nx=_7~gg4CX zKYe#ZJ>1@Oui%2;iM87KK^j)~=m+BH{2j5jC1shL1}ihZ7Qr5^d!IDfyz~*2-3*Kp zRPEGfdq_^+*Bzt1onkFA>Vx7>;&FEb@IR(k+?~iN)14no$7`FI?K1nR|k4dPsqt-mBfVu3I)l|0-G{8m(sjLJN#}a0FXKv zr}svwi*{UnAai%&_1*QzIB^Ni{$+vlHV|-CT1%;WiZVTS`JdcgyM2}QEt@VQqZ4ec z#Y;yRmBW5caZg;;sqNsXpRdGdm0e`4V_YSbASzW1WqwxI9C;BLGc6SEieb{zJ6E>a zAQI{EER8u-*QEESTE8vaW?#Ki=!v3%ZTVNhben4wlB;v=ai4Q0dT8-7fZwKDY}S|C z{9W}m=Qh7;T}XJ7Cf3KcbuQSw)O@uMNSBsdgXZquIp|=GHM|@xcCzafxoMo_DISBl z6lbqopQ&|8+ij-mkZ}8OZwaUL52iP1?@sU^lZ%}_tAbs)TakKI?)Wovb!J`Axh#Hi z&nR7dk+O9=2cHR2AbYT39@z2(<)%nsc0$8ONAz>wosW|Hg^|4Er)Z5 zdmkcQ@WtC(?7Ru2M6F(k2*gcrhN-y+P z;r#;y7U|BxRZ58WuY-up*wv@DxQQqljLHo@a*K513q`eRT2`{e2V@RK)XsRK!pwXL z{^^Hu8r{;g#W4P&{n<(J!GYEl$Z;L6aF!p_B7V$WRfY4%X*ii_d~XM`oSP-Bicg)_ zxq@!1`ZIwhD(5Kdg3wNN^js z7N-%XPP*}E3#7>rN9hpE;@RTrA*c3KB!pRztPRdDj(-L6z$4$i@+WkMMg0fkbf=ah z=vtOxGDr4=yap_}PBWFyc$HFJ*e5-rt#26dq5|HnQ z)H)Mp`bf8Ap5EVmGFG z#tEI-;#V2ryytgD9IXP~&Y~Rl4|&a#)YfX3q*7mt9A&7sb39XvHq7p7#8-OC zUHWQ+AX)t~L9H>BqBMpM4q%oilIZrfdCn@Qj1+i2lh+9{Pz`=|m0)0B%=j%06;XRk zCbsWrE1#ZXj3iV3eN)mfI0=Yo4F-UYaBpdCt?LWMxQB=UI6SeqxcGpBG;#8QGEFUCDnQtL#uV zzxZC9R7g(iA(qE(xq8IpchnRVt}AEl++pw&pxR-#?J!eiDkDY8&feViPC^1=;-D7& zKT?DiPiYe|Xs(m|tBlYQmF=g7%B6*SkA}FktfiQ;iZ2IhZ9VR6#api?9%X;Jl6zht zpa!2UVpf!dI`rub$)>~d;&i#a1 z`qGcJud?aw&W76u3Q=Dfb|od?3-ftAbi!1%VWpTc;tv!E7w^>d5ml4P24^+zdkcKE zhezAxK0_>bdtO#nR%BY5?nd%_A2>0z44Nz5y}YV^L8s(4)+q=8l!^Wde?*zr9>K*~ z{nq`(m%zA)U5zLAgLAX&)-OqkvBN4CF1=7vwRIeDIYip3KtiH>_UzMM<5k3UjCP9W zbcoLu)bep88fe~mV<;~CuJH%&<(Z;?kfD~C1M~3qyLa!#&UZ8u6B7}qWd8j5LpUOz zA102g!#M|Lmo66*lwuOgY4z|312y!_tcl6eS#xWLo$rzV9+&O!BhvFmv|PJQJD}g*-kS7g zWMpiN=9(%G@(Tz=$HjGmX8A$TBGnToAub-3lF~PKd&-mS9<57Qva>mddNcn6{zL-$ z+gUdQ()>(7;POo<=!yBS?;b zZuD$ZvLeKM((}$fz@Q;~Wxz}3M^|O0E-?1Bmfu=e+F`R)e z;2^J@j~uW_Pru)!*!93`pg%zTAwpRn{I^QUpHMIswcz%yZJJC0&3hYrmsl(>J8X;8z~-; z-8}`KzO>FNVh;eb!QOk1pp`Xaf}0tEl{ z(gOXLcIOx6P4PQ+o?*s4y`dQEd_h1oo@`phVrk!{r)R_gl@4smMnsDCH8TAp%-feY z7!Ulm8aJgSWn<=X{ifa6X;t>N)$8&-yCPIG`@R0#dsQQN2Bpkkk+j0MR=xXO-Q2qR z`@^aHt|P&@gT>|L4;eRrngf$tIECBzaW7(Rh`-M8u%4;x7UUtzQ={9rvSn%SOuB2x zQD${{0*DNd?l&F@Kl1WI_l&3yWh8l-GS$#H>;o{4`oStSx%w5Y8b{PihTqgH$Iny; zu27pgt+N5Go zs0YbhlQ%29TcPs%Ah~12a%>_xL%Cp}Wf`2X0jFuQN=p75UWJ~!18?jC9hF*Q0w!w{ z4$t2l81@dcv<&HKna*%Ub@hOb?h^eL6a?xk=UWxsYC`)C`_+wv(2)@UC6L=&2GHC7 zU4s7?b>GmILGZs`fd8rVBTY=k4~m-7b(L?75FrygpDlfv6^tF9-KmL6e7k)r>3Y2{ ztkvip9^n(XH1@-415Y7v*stTo?}pY+$er~1dM14=p313UNNS@#S3xOfOCUq?WG>oC;6ZPl5Bw-Hq-MZDd4F=52-% zw`DN^T)j+4{sfvt{%f7_t1D{aa?wK#(maf~)WQm}0o^l_Gl_IYY2auwSi4GdG04jn zBdcg8wY7{V;%TX-67RJ-!V9~*{r#BjYBDN8FZd?S)uPA1_FVu_nidF%u|K-K=9xkd zj09!yu%o7=nZZ|^`hRrFACL%R(8sG*0KTz)cZr_1BcQBg?K@oUztzZ)8fv_mPU5>q z4g~Smp~?FY=o>_>R@CooF6J?j!+H{*^!B#2fZH2+ZsZ0ur(Wv2<{20j@DFc)Py60B zR#Z^0Qm;H|_^U-%{eNoFY?+g1RoT1$SV$#k`Kv;l;X8pWq@)lg&LjWsuQFQD%vfD3 zWe1u1AT_dmH13KQsL)FEfjrEe>PoVpz8a%7d$syf|COWrQkCf$=gsdeeskFX7lW$7 zg5x9nh;5|rYrpFHi||Ms=#fCRI8len^~(QA+{L}vTlIm#Twi{ck)Qy%^y~26jc!b& z(#hNLQ6IS*5Mnb=WrNee`JRfN?gW2-e~+6Jn3neEoV&4?9r(&SLFWUKqmMemn0X_S zSiLW`KIgct#ZBURYRgy_R{#3emfYCZ(w+EifuOf=i`^PUi?E?*sK1vO&?itjUm4dM zcp;bQRk5Z1V;ZV+rFxkduS9@+ew`bsLaC*Cs8QZ4bi;V+EMW#4ZXymJ8~6A>3B9od zb0M#C{`a_Tft!Kuf*+!RNj?p=8_b&HETAgq5PR-MoByZTkYdQ#g7so2{3uN(4`A;MS(?9*8dwM{xyMQQu300s$ixkl$e9^jB@vo+UZ*1b1ki0;&MIHvWVg9v3eUy?|-bSi=^NEtc6}N z?K&jlFI`9hD)&f0aCw=`JC-6!)|j>S`w-$iA;7PwouHs-APfpXMOQ=aHN#~<@CKEm zqjR$>dk3o#niJQfV}t-p%)Z3OF>u#2>8}+GFZ~MY6rkdW^_Yn8kKgo-s@E*&|57UP zV=w3a7l$gcZD#?@I+9`UsN(=Zo>qG2t*o9zLzL2{TCAShL~GfAI6qj1|y zi2-DWCw%^0o;QoyQ`Z}bd)Hq@@mn(9ki<@qFs2a7vwDS%3>`GJ%2=*oJ`A$6IJ(uM zRBew%wrZD;I-w$ZAfHx7r8$SR=R3K605$&xZP_5^s=j~3*)<01bnn_jj78whC{F<& z%{ND9y&E{D;)#CA>eIH~rF&oh&QDnFqa;XNyOD{3!Cy{w-zX>0L!^|o?#mPI}veub|jP1SvQ6eupNX&}8HLrREQEP@u@8v6}3 ze7Te#{9hVk;6VGa6&mi&6)2;3GC+?0d>zKny^5%T3_%)|He8d58l?M@0?0M*a6rM% zanccRUZ8{01N1YCJkzs7P`=tL-Mckm#``h{uN+8~bb?230!~0?GdODX!v|CrlvRu3 zYvW=b8`-*FZCD);AJX}>Hua-D0&qPV>b@mG_`7aGW3w;P`Z)3c`k+ZGkxrO&oM zEW?`njKPWuHcZn}h$<~{Dn`K_m1YC9h>hQA>luG~IwaM39$-N;B);Qg;xMn$w6~K4 z%T-YCL|IsD{7zM6$y#Hm$2+gE(AXKKTBm&Y@$7iR6vhY|xWFO0-$3b-Zi7)jL}!#e ze7w4LcsE&hwErv5-*0|SK!$_!i*@u9QBmST0egtN=9s6Ss3);$C#Qg%P7O> zY1UYy9XQk$T3T6|Wv*f`>aN2OYRLGDDfqMgMq_j#|7 zK1LFl&M)6moG;F-MSf#PX)8`c3EUxfZMxz~v1+i-DV)!~|Lc5RWz_fJ8T5L^LHlZk z>Y1<$v9%f8{syI z@h&*ms9E?>IcmsP<2=1D8l7LR#W3M&;StS3vxzmx%P79KSlao|j3bpaymOzSGi;#y z`5xD6l@RL}2hvJlW=V^s#OrxfN#)wZvX9yg&dDqam5+XLBdNIDQoEqA1_=9B1TA_a z2TTrW$zvcXjaeG&>es#!rQGydVuatjs=;0Mc7obTcLlWPV2j#1U^LDiD13;ud3)O@ zfQ)1WdWx>93f^elNLgRE;WrKFV(*@JY@Ijh%}yT8$&4*z-<=6~A768P*qQp6r$bgN zM8A_)`l1$v8mpq#`W2QhNiqZz-w@_y_X zD$RB~?p3Sv_EJf>@Ykq#;8 zlJ0IPK|#6%q`MmwkZz<)y1V&qp67kP-XHr1%kFZ{edgRVbIo32&)y)t_I4?KP%L$dT%b{8KIv>tpec^8eepy+~SRb^FJODj2OYs?!i1Ok4ayaBT zm!#OxBo$r1zzJ4I(mmQBVjAvp_)~Lb-<&sJT0&0ZOV_ct3TKuWsjl-!DHnbP zcR`7{u0zE4_gTLo#B$2RBjbd`w~#|QIRVkzqhlP8CUIP&D4yM%wv zR3bS-$k4uA{Y!bT!+?tZ)#W!aL}Qmo|AvlF0`~!L_wj$d>xi}~Q0 z?^tnh=xi0*^XJd!DNMmR8-feW{;*YBZaXHb=we4yME)X%*)S%N{|Ud81G~XF4wX8C zrsADgT0GKOKmB3eRkkLvm93TS-CN-kG$A2jJpL^bLS!kO`E~oix*@s-&ZhRzi<>Lz z7f z54msir!ssrEt)L7(<)?zsh)2U1`K#|#K(bJGAz%Lz+q1-|AGG|5Q`)+9u36nhd;}) ztGcm`3~+aNpgS+2P^D?39z4S;trwu}QAF|AY!=KcZyu1$lPy{V*7pu}P)%CXOj>4@ zsp{&^mwMq9tMy6Hq0?fz{X>fn?U0p{LgKNRL(LwUK6rRjtd?C?hU>E6M$b)Bq7K8~ zCO-bR#Z?eo#7}?FgC!#?%dqV}Z^A;#&5fUliKz^zwt>bUd((r57}LzkCKy9Zpb(-| z`oBK+;7k7s>+MKyOx&nX#CDB_Z_)>-$0r+&Vl~iDgakD22+%8~EW2p3nHK;`F)_CA zI+-Q-4p_56k0ibcOdCb#?8|i37AgQL_=b}5Dft!RGjGTE>M**g*w~_xB%tqV-N>l- zD^b*>q=ML%dKHMu$`rAyE7B!#Ua(8y=oAtide-+5oeEv8#TBu`s9*o0p$ccUu_Pty zb~ar76=`N+Vd#f@M%!JUI$dr3rj<4P6}N^{ayN8E#N)jeP{BiTph5^w4ljErf7`u@ z8h7F*iOer3FbtwbSNiWFN~E~N{`Ge>5JR9S-O4te*TaBfH9;XZvacn8J}Wxfp+PP5 zM?pac=!t^?3H@eAwDwAq1wYHX!npT|xeQ+g@oyy+O{%U2xh{7({E8wqW{6k-{L$la(Aeytar8-sgC_EBrpz+z}<(D8Z$TnSE$U-qRljC1_wMH-rL z$bXM4G?WG+N%7BnfrX*A#f1EO*(E;fm3F`D?UglE!bJ*Ff24YYM^+3s0D`LuCN`&= zz6FWnBL)j8^;p{=rZ^MAdTIwXdR9hDF4MpKz{@1o{G@#n7yJ2h$H#q{tTE&fD?{-> zIL)jUpQNK6!?zUt`1SSmvF~cF$#Jdsiujb=$&MziDP$N{lHLaHSB5cESfP|v)(~n` zAOhLLb_<&kRLYfpY!WWv2l9T6K;7GyEHUiQ6$boRI53XK$xsU}`#`G1L;==`!)4!$ zaQBOqr9A0eF*10k-OCAfSu7O^9VW1g5&0;D44|M_Q2ogKT2VyXKK#gv<8djJnfG^(-@jr6yb#>0YZ$-0(~9k0ey8iQ4PXHvry$) z@|W6hkmFCD3^H=fyEs_LS|C!Fubx>}r2iNI&{|!HWEyTFZ}Z1SPz!+=HIk}@ga%;IXcedJSx#eYe0$B{@IgHG+mL?) zCRo9a^jZ@8qODPZvaDi+bA$uLx4p`YzW$g`KTr6o(NcY*L=Fy8A@}{R0<*3pmC??* znX^WA^gbh8dwLc6l#9M#yJmI{^`@nIK8p5_)(@MVC&r4*Qfeveiet|@wsDna;Zycw zYQ7b=JSlM>vWJN>S}pV)|LtQd)Wd@YVMI)X6>b!D`(Z6gbqpukL}R$Ii50o~B1s7w z_TU+iz0_Xj-aWfZNoQS(@)o9@Gn%pqC9NFx}oP<3d8TUcyC_*BpSar^SG-r%1CgTuO=>t zt8H#wdfKJ*97(8lt>>^$^EI*%RH7K4T~HxPe^B|oV})b78BcqQ?F(4mi>$>)pSEme zh)Z;Wk$HJVh;K;88DB-2VpRHDmG>G=_@njP%p-1>2!z;BrBGZcsE^kPuG+(avb(u&&a(H%`@)NQgpjA_y>9X~z#;;od~7!HMID zvQuR#R)c-FNl`D#Cu{`j>{- zCTHxaouS^djm=clgd*hq^rB6Y5tT}Utl|J9E{Fh*?Rn%Bv&`15@Rn7^R zv8jgR%&^;GbIGb~Ieh0TKRNT~A7A{$qfcPp9M8CEvT%Xm#jfZ(5|+}j_rvM7xKK?U z1adgN0xxv2-M%pN?yGI56W*9owTTD|!G*!j_U!v|{_@i;U5GF7cg%Bn-mfCf7#E(* zD5lQdl-Bz95b_Tw_cvF~uB+d^ncc7Gc^*tEUABiS92K_icP4-5GGJ;Ya>zAV9(iCq zxhwVXxkhUdwtOaka48~htsiPTL=yH~Veh>Z7WHl_>~-E4FcWx-=L-GVe%xMtamRdn z;x0nE#ZF~5H^M;d;a^a|L}*OOH>BDz{{ccJH)7a^C%jM0yt9o-dVwr#-Pf>p;ky2# zyUc#|*KoCiEmB&d%(vwui7C{8>(_Myku34|yQq{2B3Q4K5yoWfFP>Vb zE(`v0uc;qn4WfZcR5`V|Nn8ioIC2u}Nq;kbY1jEqSLF%+T|%hYq0oiJbBbo(!|nR$ z7p!d#me(z|lcv8i>t7sN1lH=69-KG5GE!-bLJKR0_@Wsp!|z*)&OSZl{d%|W&~$q~ z#h$|9kR3#&r_0<0)YqWl5yiPQb9{XzRm7Xd%@bX3dLtd8X95F*aW^ILD?(lnj(Xk- zMSoQ-Ep($o^1=A>X1%BdYnwnb%Y@%T>Vr@RB?fKg&P`*Ys2WJgwWMNH5((64c0#pt zRW?3ZTyBrMPMr+2(b?=U($bARZ`!GEnNhmk`%c_fLJt5;O zVg0{j3tJtw_xA(~i|$W?-MI$S9NM-VT|D$Ga4g-;#&u(TsZfcUhAhu)^H#qlT&&iz z?RVd`PDg3yZXeyMOc^+;3GuE#z9BFD+O;yiU!xZm8H}Ot(qJJ}RaZxVz+?`_;F_{s zkH?Ul1+rBuw60I@%rKU01kl;n&_GjE#$e2Yv--g?6wg`=&gsdliE4NLsG2&enVDrC z-gA&5zw}3k_=HlEoi!ktGbyq!Uc`&YdJpOowjpk5Yx@0%UFXWveCf*F z_!6(;!g2x5?V3Cl{g^>_@_~=a4bt}Za35&0{Qf#N98K|J8!XT5Qc$)wJ|ktGqVVfn z-ak5eA&99_o`cIzix)#o8IefLU$e`8db30EJ-Rz_7p6K{Wk(x~ONU_4?~|iY5&RcA z5PTCEbs7wH44R$gj~nk*_@D`^lx_K#>5Qs?mUYedd6?k%$RZL3L)h7ij-nQpifmE} z6BbDmrh#ktMD2Q$Z3KRCc}JsnXNpzCLyu|NuW_|*P^9C5NE?FlbEJW}Q6as?4k68_ z`HAU3YT2Jujk8JUTSnPcX-sONYz)(qlAtQS3g7&6w-TYQ8tH6das6j3j{Bm9Mtpgr zGA|z>DpfV*VxghP!|9W%dUp8HQyukxT=yr^h~9hu^|5WUB(;b+9#H2vI(Ip`x<_t0 z>c?cM-?8|Vd~r_TRqS&+L{l{HiT_kZU*?xn>FVmSiYxfk68PsjarB?+1xaq@2mc&> z`Y?A7LtsmE8hZ>e-SPU}dEOlKc8_}GNNxNqK!K-7W~W0kvGppPJ-FTVlK{^S3ZD(- zgXH-1a9zvRpOJ(eAZd_-M6o``wpp@Ope-;O+wS6wo+JT(4bD za(wal_?W$m4^bj6HAuW@7FIL`vRi}bvFd+=@y8%NB;wyZOG%cAnYnW-iKGjjxO;_a zo8Z6EJNhdV6O;TOKm7FYIplFQs9?=YjxVwpR(W_kK)mt#%^L%DV({u+5BP1e`Bv!n z@825#+nO+(+C9TjiieW()Xie%+aRzIVqgU3Q`XO7vRA7P z@kyF?&N`2}>$`+vz6JyAE~lY^WaBMes52JgJ0XlfjoL7c$C|aoj4R|%X9%s^{VfHh zDflQTB&2zrYeK~O0~@Inej8@up2dxhD*xZ94#@CFpFna7CVKk&c8=^zdO@GVe;)X7 z|4kpEP$@{T(0^WrL4B!l{v)hbLR{+T`_H>Shet%jB8lm$baqgo!^`|9V+AY8%KA&L zzMi-{iwYi6g}`S}oz>9L(A~}dIfUVNPjD{vhYnp=<6rEXXYPckN}}N)F|%PCH}o8| z`%ixV4D>-Eg;oCO?hdR|MMWpT2)nV$$~UTZnUBh8Uh;+rDx|`{ep7BSQD76zwU!BI zS}w@GoXJ~$Gvq>TAZyjr+;TnX;QCRHf8jOJ_Zpe(4Wj)E(6Ocn66#Yx|+t^o1}~h)6d8?OlpR zb!Vo$F~G}=<|$x9EtNQ>)^{Wim3644e;45sv|)Q&Jf6;n_%^qH8UaMw-=32)%qP6_ zgH@lY#&7eFY2!d7Zt))O&^P#w#LlKY2V|{QR`*~c!|?McT6{lPaxL?>B`ZnlMLmFN=0>>YMagHq54lCII$hW~g((zT%aaK>J6`Cw zkohcFbd}Qy&XUZM?wnKo(H`>!Ujh3=xgklqc((lOW&Kti9WQzLYknRtdm&?qw6yKx z?}ntIP448l>nYa0vop@^=BmvAfZI-NJ7s&WiG}1>b5F-}2go zy{gXn51SnMPgazvRp=#nxXD4zE1{zWi-u$!+80wTy&2k@(1x)zs`v(1KBXPKR)A^j zjn*G~{%nasfcs^>GQSz|sqNM%8)C&PJyk)mcdeIs_px}cc z>SmAPtj;UOn2_n(&o0!wnVfies0A5Syg$%Ok5u>hhyhtjrMF#`&dRs2Y_F<)V{JLa zT2JxHzFao>fo7Ieclfs)O8Ko|96UUQ4_J1}!;=U~)s^}S=J)KWQfk|Zw^PE4T`svF zQ0^XB)zX`JeC)|u-7ImUh_zz)o)L0koN1K5=RBr#ZSHQEo874p)Tz6^S3kOPI4M81 zO*iarA1j+CH}kjUWjA)fz;Xwh{)_H7 zOOX(;Ll~5+At?FORKK5@{aaPtt-jaxaA$}OuEBVTWM277Nw2u@#UA;~%h_B6jmld( zozyR%vQ1~~A1Wzn)fgpvtFztj?iRMWAXcKT=%QY3M0%c%geARll8xW-9(MwqyT5T5 zlYbZV)IUieH+BY@uT4XR?KY!c~o zV(G|ct9J+~hgYha+31*I=m?w>B^OF2G5+c6&gRW=>rwmJeuI=(w7};809MoRtL-?B zk`F1$(lg(Hw2Y0ZQaBv%2$fnIaR-#cTAy(CBq6v42E!6uFH5$^8{nim6DiV~a53!E z$tYwwWxy|hK1q?C^Ld8Db>mG*XP1?^!cDU|zuA7-$`O(CW!g!?#l`Byg~HEByb1_p zY6GWxEF%1HCFDy>t8Ka9(Fsxq_~x(cnxpO%Wd1UV!}=3 z6@T~r2uih;+6&!{>?yfUm;q=7G0C%+expWK7Zo3o|9pR2`3TF6SWy@pAYseo`? z5m<)hz^cAvMopbDW?~kf>;H}vA%Jqk%74rX9@7Isg3|2+T%Jqp5Z91UKvELPrWEu- zClmWdM>gBt*?oJnR39PyS7@CBvdmc^Z3NczIlAMP=i#d5B_#Lo%P!zMSWYXp9 zJ{oO9z*%vpS4*8aAL@OmZkRzzl}CvTR*wmttRZ~sVWi#Ik8PL?7mP>vj5%1maH8Vl zxfm|)Y5v!M?qvHtw=<`YHUikGR@mDY0Y8R3EJ=68~Bjy3t z7tEolbxZ!_)-K7^hS9V(4zxO^*c~PXdq>-9zWCc~CTKkq9-UZv-!~+xjz-lDYL<3B zJ$*YWApXvY#zNldODgZFJ;}L*;&ZXD`5`nrNpKjRbwg3CV-RD=G-?}m#%-Bzcs7kq z%Cq`un5$8czA~?Rb6d(vXv8X^BoBdcj+46g_bt>oZi;PP30drrdh)_)NL~z zDGx#KOIiSA6L7zt)nea^wB9?Oz9|0L;MG?D`AL7OU;@%8g6?aws5JBLX=t-c{-Bm% z8USC8Q5Fa)ZsLU;xRL&W7q)EBf@mwy_u9A zw}&~-ED1QE4sxNmla*(`K?*CjSs5%?zw`W{RA z(Yip)-6w^_#U$bra3m>6aM^quQKP*Zal!3VGi&s?o$JkfL=|m5A<>sP1@~vBH+_Gu zvt`@CGt98|Wuk*UXJxsr8bVKPjNIn(VkMtOn|`1_SaDUPR%oI_w%*v4rI?xMwXAhS z))fVoGUNJEtKT_?9$8f2??-+0{7u3^cYg|#K^6wi6WWUVuo`SWERQ826*N8sbsP#& zEto-+Z^y_jthLomGF=nzyTUqi6gPg<;rLXA^c+ra-aIb^N{(M85 z)}8m&4AT8_zDMq-ZkiY$7j;Z)YS)(oE#h|U1!xe>9;yr>I7o}}>X(q_$Ngw>@4n`8 zouh75J=yN!lhCG0h_lCS^nrH}*w~M5JI6S@RhfR{q_HYP%Jt^Jc*e{6BxL0RC ztG9Dx5?N3W3F2yno_f^HO%a5zZ5b_D@L}@p*^ym|6(|Ido$B%XK zAT%|L!&my}h(LgNHhg$D{7)k!4J)y$^4pnZK9OyL@Pk+RfbLgl|I?7Zdc| zi;+HujIQ+Wh{8?!bhMdfymWyORF9qxhHD)(>)x!BDRsp&pu05vf~%9xlUzUkD=fll zG?96KzEF}a`$m;kJz;!xH9O?FVI2?l!54BQ(6;t#xvPa7f7Ez&o7G;IAn>P?rW0_j zCx>(PuAURlyZJ&$h4W!t0~?3>rCw17w-zeHrIv}#sE^(>WuQG0ru1jh-`rL`tGK** zS}8GVrJ-%{os1ry%F85^L&Eqd8${PgI3gdZ+cvGdUF$oz8Y_v zg^QuSHLdsk>!)Am)Nsci&~rr|F3%-y4W0N!gx?2tZb_(w5cs$sF!x_?H4+u|CEPSf zgl+0lzKVHFK5+5N!4RWZoTXEiDAN6u;O4R~#ptN6;lJRc7tw^=O1YB)X}imIP@}Qd zaOuv0Xj0h0-8BNAu1$*PUdU)~S5qr;n$G22f1jvI&Oatv%RDkUy^zoAqr-_c75Owc zK~x}+@(xU;*p81I{?s8V-CIM*`UFaB^X?w#@DeErlJ@LX3Nq2XZCbI!$bs_c-bvUygb zbg@|k1w1$WeNH6kdlqhNQ(}U$3yDz91Yxl=kG7dG`_HQRQ!ROneLMuQOS8zu{hzBk zA7^jHv+_KFNo_A=-`Uc>Uwcy~7nh(TdVOgrbNsBYi!Q~;B=*Q8oTd(mAswr)*F&Rk zGXgLl$6y0;hh)+bH`hvAB@^8Od}2HCiNoy3gqr!Hu(k&4(TRFkCEEGCB`(*#vz~kE zwN%4~C5f-ItijM!R;aYNuglBQ^{5gjPly;fvqU-*72BJF+J^DXg$-ho@0^@TIkWGO zAEe?MQFz^zx_A-cG$8@0B7kf_Z7O-iF(<@`QIJ@&U7yzgSKo=J$@f_4=N6k=NA7ud z1d>y6r#(a9|3K2`q}@$7?k02S$`&Hx;2(O$^~V6!<(m8bmG0=57@jY0+H<@XwerIg z>|i)&g@~@wm#+RH$Pg!pd??NKtpsJEuGk~P2l>7Q?AEXS9Vb|wic}LOIAWoYmoBpU z!O|J^&CJtVZUWj<-h6~&Ul|N@sHm_;uVvqtIrI`AEB3f~%}!0L%=^mNvcXbbhc&;9 zbX}nod<>mg$Y^SKX-KF_zQ)Dm$!4b^2BCve)TKX#m{x1IO>M~SP1%9=4u&h?JkyKK zH?c-G92s(uNh`mDaJvFV(f92z+-5x-05Vk#3{YQPH)VSc;&^StRfW-DZUhlX$3e0g z?NXnz9c)rUdQKYeqgbmgi`lJj+vJf>aLkit8*O;2!_4+^0gH(EC@U12cIMdhw;S}a z2laC5jh~c59Ygwzo$-{-NFI3@m2{g&qei@jhS(vCPs*F2O05y(=+i?TUNsy`Hnzj7 z%}igYP&Gh<^oq*L%n>t*a} zHiln#vLyu#^Y=xBJdKeS=NR7d0W63$;P!u>MEAcJKgIhd01c1*S@%-(DVQV0f(v|F z?Y&S>6w9x*Sjop$>>U|p)Loc~kwZV=y`{qgiI5){10!BK z6*A{xpZ1m2KSfBjt0-(&6;`2%=NH|IKk`w{cdTsVvjoL{<&Rd^fOH)ys--ez2L?^`hmiWpLtaCE*zm3qUEVP1xY`@p+Bw`?1FlIP zRvTI&L4L{pk^R2_+L}wvV)b-@>mcGO?lEK(AH7*G8W1`z{nLI%5CPT)``W^j8=4Vn zd$TF)3w}IfNo7q2eCR0Kv4)_p`%u;py|DW2@#5AG2n)VcPM`Rt zgy%)b>t-8^Rp&@#nY$0f;i+oxQTqaE6apAlp7{!h9~PgjVmP_WR@vf9>G=!&X8(uc zkOeu;VF3}wGJv_RAHUG7S_|l#o~RWPfCjNg^OQ`ObT(sVZF_HHcb$gJQX{bBgnaf= z?IFK7mcj(cKsPb)@S_hsO6!G_7ldgaBxv{c^>tR5^kt|O!pYKlfXPK74waf+aqza%LK`*lyBuaA&GGoc#IL8*Ua|7YD*%I&?qF#Oay6tSJxLIyX` zH$w(#sS%<|_7g9)+l~G}9>5Ku*mbn zMsz}{#m5uC*VXwy>x=*ntpH`mG`DRItGT+^AaMg3*%<$%w8_8q)Aq3Jk@sNST}wt@ zRu()^xCM}$&IuqXcE1)M`hDSdmJT5#t+nT?oX**+JN7bfEHLtWidQ$3+mo<_=5jl= zaHJn$TEXx-!6+~P=f8ZEhFA~QRMw=Mmfhy!QwxruDO?bJ&^Dtzj)WoAQdgZg8&QNi?4#dO4BOMZbRz*8KL2j>p7n)Z6R~fVNHkztnVU0$(Fw@SdZJ5b zTG{X*P(ec25sfYRH9ki@zo0>v`q$tmQ= zXmI}BrN(JBbp&)i;B`6V_T6iPxkHd8j7K961AWD>_9-!g{)idW_%a1>YjrhBN4q%U zP$7vtz7~alt@x1719Ca#y0N*=a3m?C@1;V*f))7n?j+4|&%+a$QRk(%@)6;9#kC4U zb`E1i|K~qcCIlQa-!bgkdXA;h?S!nlI${8V7pj*APgwj+dTHr-fs+d;w@w?587ukR z{#~2B)k%U&q{|4h;tG0%F@-3OjHCGM+oTb4=uV&LG797y=J9ly6jN~UBMtaf)(Vl! zpuY1bmFS~psl7yKb|0yO#6UI6``;ReQZ)SJUw)4(Y8(jQ(HVmKis8kJGbG`OiHVkK z59ByNh>iZz(xehxunpT4otf|};6V<4(x3`;bBC*UH@8Um+Q;ZTHV3Clnseqt?B2=4Z>Kap)wtOLuRk zXuTS+RRm@F?2*mJ>q+2oGC-J9;0ZXjXC6%jn97Mt@91C5?JAILjWx&71rh5;@Nc-6 zU=pxWD>}$bx$_U~jqVi)v>6_lJ4V?pxQLNsY$^-+MVW@rT@6uho9jdKCbDS#7gPj& zBeOyUTG{E<@s;4C`ZE{)pAsa^`r0SK0JBNMqn)a&$iwY05DIU9Q&>_sk*Qp|7Ly$o zx8ik|mh07|QtDsFysv-!+w#`8o>xDY8dEOKo)SmoE)~F;NqprVKSk4b2&Dkrn->D&`m)r|TF- zB~C`v*z;>$O+!aq1d*5CS6L5C2Ja zTzqhYC-)-Bfy%Woe+k;>x6D7NWaQk1E7Qd{xCrD&OS3aY>?B7YeFYIrPWFo=eR>Qd zaZ~ouFir|O6_D++30Tp1hG3MRqO!98i!U#lqk13>!HSec8=L-SCQhOG z)?eiZP1G;D1V_Kb$1g<~k())6PwqJUHRrdRQ14ie7q~`xDnu+p`ylp?qGPXE!AbBr zihLzn-q7nCyz0*YE+Rjj+^ZY+h3bhdjl*1IBwNS5BBcYsWIt4}-$(;aEvUKvuTb`) zUN7WxNn5t%cyvN0v}_uL;|*A{;xXz9*?!0p$BHAoUM$a*4(HE<`5+lM#7bRv$Rj^@ zA!VIxJf6wkZf2K-=Z1z-@bqCMsg z7V-kr%70nIKQ6e;G_w|XKkiqqvf>-t0V0l8Q5!{e9&Ji~bYjB`g}$IMuGg#ck~Tn` zjDkFpTk|J!%v6{@xbxT9CD}nJo4O@={N1Hi69${d)W%WN=E%;OSHaYBhmU}oK~3Hr zp?=fY)Epdl^~G(Xj|L)QFeg#mly$e)M3?z{r=SE3`fef`9;n<{06_Se2VFTye*0CRK!8o&kZo2SkYSigfqV*gE2B=gH z*HY%aNUaexur;wsb9Ar^t?Bf81kw|!d*$`h$Q7liq~Gvcrr|}FR1Ai25L;C~1VX0o zJ?DSyYi)%o+@!s`ioFN~Yb-}v03{Mx$2<&F%n0PGG=y*JlvEn!CXQ3tu?G(4os&s# z?wkmSh>S+Eq#zKGL409W&nhY^nmyUoZ$|1q(w&QeLHpmcFKIY%k#ww`rQu6~&}e$~ zN)yTabkGZe0B$G|j0mdRUX_njLp>pY)LA`1vyg9RXh6+t&7G3y5i-@sbxgbRaWGR} zF+YQh3ndk0Ax>gKSYAlE2v(6~g4gRzI_G*2LoXH`8N%uEVQs?toeZZMZ;&AwCp{>*1Ce&$NS7z&{H@Fcc$t(C!K4#%3k!=3BB?npU+Ugh5M&7b z^`Rh-5@uo&0du%4A!5quZF5=$HvhKf%*s7M{o49O3S~@dokIBpCOMdbQs*hdrIuKAA zz#X6)=ZU!Nbf*8X(aWLPt?3qP7(Ei{!lc{6sMVg5m_B4}9k4nrZzY7hwzkH;R^K&P zHf3W7&A10B$l!?of95 zvZi(r{3+DEOfHPHli^Ax1)?mz_dT(>vD!{QTzL+;w@fxQHa^|5t3jU1ql^ z*o@pc{3U|G`T`q91}t!}+mRBmdTRUydky9vEx2A1KU(aLo*&rI7lb2~!=%Gk0h zRo9&UAq|BkCBOLN(6V&LHeRa5XXi>N(syVAp6XZ>Ffo*!)Sg#Rc2>#YXVtz|FJ`ss z%*x?k5JK|+n^|e&I5Pg^01>C_G+r@(7T}d_nc){fiIi&fhf?J1TFj3u^^Qs#$|9HS z++K2xT0FE-E|!pNULVIW!wL83O!du}bG-WC@d>!)o~ZzA@46AUbS ze2W23a=9*kEHkQX=%~8@ysr==nq+BXNq6_t<3Qqoc3&_ATTNgj%}zaCK@H$vkH`Kk z{%_;9TEx4gkB5KkzuLk=BW2z$u;{(oK?ect7iUm&WHVp3kjkwhR5m^9w&R@u?o#Br zsKyY|Fs)xkXpjzXZ{<=A6X2o)_I{60H?(}70SQSOXhn)wF`=~cv~wBx)?DUR3roG8L^1vcd;4r!sprO z$?542q2XeTL38>q7l3k*>x=k4N(EvcF#S}c{^R$9jS~dAhuM7|gcMV3iCE3HNClXeGF!MW6%`%b2eDkrGh;Af zCBL*Zgc>H3f2mJx1+GlMF^RIucct&n;=Wi-GiuQrHZ1-OTWt)gE!YfsSGZ>mdA}Q- zF*^Cf**_u=&9!351r*NnWn{YoLK+yi`xx|XrR0V@kn<(weFA*2vu_thhhX|5p)_hl zL0a)@{URof+pMfEz zvpq@Nhz`_n}! zi!ELX@YsGZz(sQSWDLcXBKkAT-SxH(Bf6g$r1(Sd)Y~!~c>5iDL0X@No(^_o0D;PH zf8eE=zE})L)A}>Pai|aZC0Yj*{y>)R)C*mghWM^tmcX^56WgcD!NwPB5T-S?F#um2 zU=B_mDb#@Vcf{I}n!uYBB+d;u^SvU-B@LAlHX`5B;^Qsu? zZ^(7z{7xnqg-{C+d!C%d(3^^X{rsXhL`=Qwk+Zc?EKO$Fz_|xF?|?6j1&$3?8HPlb zo2u?+!MgT?A6NCLXm(*$wh)e01q*nN&>Cm8_%gxSh#-(&0qIcBlSYRc4w>o^d216yCWJz*Q z8GYa**vMBCk-CVwWW&`44kh~ovvHXFquVO#OzZp0y&JT8DGzpQZ^@Zu&I#EFJcP#@ z@;AYgiYQvJWQL7Ukk4n1I1ll(O{iid{P}ES@(O)b&oOW8eyI z$T$_#<Uj!5dcn~n@S!C5P1Jnw! zt}gb4R2cWvg={F9yRp=d_?|EZcVpD~RKNq|=GG`0gC~!^%W)bC!dFU{KL9ftI>6Sc86=B;Cy&V@!`gbCM)f*|Ahq5lU&2685})|hKLVg`Tm&Z zH*{{f3;YGOuOlo_Jm4MF&=~-|7{#%@Uv@b)>o1n;J_TEMhXg-D8fttI@>fC-JN`#& zv~BHmO?vKSS-$^wLkWxxzfL7}$hLi!{~At6xyzSc*~HT1@-GWgF!*{Pep=?<-y2)p zoO_5maQ4wRumoaE1~54l)fJ|6>XoXH*^2DXL~U53E*Xau&cU{__M;N+f^=sy zB}WKpOS&lF@Lrf5{^WyJUt83fbdhuRSwK*?C*_(1d#Yw7m(uFja9Amo+iw!jo%U!& zml@0xew4r_BD*8x}EY8=<>Vn8M{6&N~)uziO}>*^bd25P?Uls+8XP}C_8jh z!u6aGAnn>RZDCh;aReD^}K_-(@<2z!0n?WV8eLhN4&$el=Y8zBoz5n@M&8M{Q1o$NJVl|fu^&!LG z3v35o5UQ^4;%*a8(CXq*74Ag)J$9>UXU?U2tXX-(>v#iMg^~BGHE|k8yEeDWVYVHr z-fViF*X}}Ba}-uLxA=yJYn$wXWt(~FDwOOvOLPw}xBeF|;mynkq8vE>qO!P?S ze&m-g))LwrOi;Unoses(OGtCz<0oY0VcG=mLzbSmUMgBz;j8IQ=o{aDXQUli6ra4P z5YnVKYQvM-Pv7Nr-=CHbmDh$%Sewo#z-5nJii&q^ae?(43p?(4Epc@-uM zfC7m$7qingK{w)yB@P0FMGeGIThHSAiTyI|Ib`M3R<~s6jiy?zBU)-`Wo8fW-2M#b z6A+US1;`K)WO&Zk{&5A`h!H-B=9oAH({isM*|_qH65 zyM!8yElpZ}JOf!>)cRKT_O^w_TY74(S%3GI)t(SYQv{2{#nZ@CgCF!(2@CvDl4Sg^>Z$mJ6m?4pQ{=q* zL2TIES$9_Skfow}Jd+y|yY2yXptvKq>4=o-M7ZAv`mN4=1iHl`-nWC8u0pi$BG-9t zr1WQtr*_JBUD8+o*0u&OfBJ?M9Lj)s_?ET5{_RA|EZBlsVi?w1=gFNKHL32ptZh!^ z#*$LIU%F-~)u^rWTJI<#lFL|lWxKkPg~^Y}^fpMJq=-!CEk;I0B2ghl+?kK%%0X^( zO;UN~FbXb1lXL&FaflFBw(w(8deX`Ln62mxeL96LY1pKC{~-cl#!?@P7%86r%!!-Z zsn`gg7X~y}K~yEmRoAs+nSI3mi!F7Ov>%$n{#^KkW;@-Ag@rQ2A8l(bFC1A3phJ2T zi1_4~Uq3JW{cKR!7^nwe9vUR3rAR^al19|NF5kk9mD{VVVlULsMOjsj$0!1$u{G0;4LN+BmZ>@y>z7EerF5gNyZ;jc_EGHWjW zif7^G0*!^C_gAnry?ugd0U_iWW@P(OF$OLrk%QDjo^)}W3XVd@%6p^}zL%GiL!x@oVpXnDvT4Z)qU&-)DX}(MRCvI|f|IhE>a}{z zGx(vck?VuRemJ#L%Xh`}Wd^fo{mJ|t7t?G0<9bt5^*Y(7N5pZdp}&&|d5D4B@s}~x zr>oLGTtaC;q;y-`dSgY4ks~_&{ms+|`bvhZZx5lb%Ew5Q4&(#_^AMH1$X*>{TJhGjDE`)*+d&5Gxj0x=?p9?WulruLWn$^Sn= zlTiRQ4Z1-}2?6z^oXN*cW}v)yWYc)b48y+#-jLyhvb-G!BP-d*cWd9wSLk_NEXO@{ zbo+ZT1A}FgW^B-*;B03qyQyi;Wf36#ieNgHT{=xA1o%;7dvjb>Gy~o9d-aj+qs!G= z$O+g+RtF-hFVCXsQG_-o&E0R;j~HFowsi9a7NV#vkZf2eqBhboy;HLid>jzgg)gCG z^1LK{2Y_~VBpPv;{lM6%kp2hjyF~C#X>S|zg9idBCRRR}7@qCaiR@^#(Elh#kxXM> z$L8ZV?dD24Gr7TdO%RAkUNJU4Y{tzZl=TL`p8z)qasI*dU~o0L;N zgd1l2Pt*9z&A`h|&*Tce8l&TH6_t>X)J|eGk2^i{6mpGOPkBavaB`LMKPY?asH(m% zdi2tQbV;{>NP~2PbW2G$NOw0<(hbs}f+8i|EhQx_-QC@9U;TW4FaCQD9OI&J?>T4h zy<*O})>_XR4sxu(QvRxjN)VD9vx!x$jaE4m_2UOS0D8`@1r=y<7JM)D(gw{TBH$qc zx#3Ep^bWo8Nz;gMjEnTr74_}T@q7!dyFQB3=I65=XFehSL7P`Ge!i*0Cxq{iugvci zI=|_jrYT6Y+mQ#{OT|)Av0x|(U7HrJ%b#F|1TIuDV@W+16IxYd99A$|3Wt>kFX#N! zv8xKwl|LW7oXtL!26MxInR7%*n&d2&efk6sdY+cndZz$18%+1SGJ!t^Z|;(U2nn<0 z%FVF*4c->;*gW9Z`mvl=?+ZS9b63m+`XI4j}%X>7YG77a0YQ6{^xP)d^?*W+qA%y%Jn{bP7| zk$c)2GP~?&Y`!e?1ndK%{PsFFLL)L5m6EJ>iWJ*?A=R&HV79Kfo*t<@ZK6RLTe^Jj zz`&{7uxjyy7U=fC+aqRF4~oim2sxlLwk5_{0MY2`>guOZps%GB9!<>c^_cz28XwHQ z?t%_vMuX7)?bA_Y;KD}=^}!Wdh80>3Hu26EXIm&Cu(k7U#Oxd#gd$Ws8jg-^F@(|K z@T}3x6gyane~YHZS*)u6+)kUayUL!Nxp}!zQN;qe1mH|8;bF!ks1V;5AsL4TSb8{C zt97=x3xYDWJ2L*j@SC6D8?L5%6i`@2h>FtqWByp+w`2Jl*dRHsaojMRK6iT``>d0+7l>YOz{LW zLIN|gyADj8@6v>@IH4~6{qyL7^jcN{G@J1?hg zxFFDhqz-3WD%1OJT8t@vv7JzczM0zm3A9wOhy_d&beev!+uf(&_D#K`a)CJqjq^Xh6#b|OMlDo8X$ zamL1yi@=9g@W?PB%rY%LN;cGZ=*^Y49ZIk$uHgq>C8KS1nUNR`b^=`d%yV_i+ihz5 zpu*zq(Q4>2QiHe3dItLY`CM0xp{EIj1igvGRE+@-3?-(>;31~+bkXvY zDsCoiSo|)z`oep}RwuFq&EI2}Sl-itX$8L}<`*4TZqa)al=sD;VvXjD9x~WNldmSZ zQ}!t@u4C2fu3Jy?9?mles+U~l{S48_?t4;;1Qw=?FI5(g7`~J;(Ruhk6)!pMUy}5(p#5 zKU;utR9oRSC{$y}1KK9|c2u4C(JZ@%7e49pukonf(FBW1bmAupa2COfwtXuRHkw#{ z>*0LsSEt(%XDj#mSt~lrV0tL{XiAw^u<`83%xRdhkG@y30;h^>?qn6E6)h@|J41m8 zbshhXO>h&Z%t9E{$ynjhAG&^xk9s(@hiKI-iQ)RV(_7Zm=BF}64n0rX6=|%%+()W~ zMR%7Id25AFRX7+X&2YJUo;@ibF322P*d5=Fn#4AWGubaMB2#}`At4wHgG>#Mb29!@ zMRr^XKj_n(u<$O9%Or@dLS~RBEbB-)Y;19ObazBx*198uE^)p8bkuzqlQNSoLgCoD z-ez-$fkU4Oxk$AG-k19e!6wbCNcXoojW{@mzDP*LJ8{h`CNvulII9OfIJFP&Uiz_3 zhe)yiJ*Pe|v@T9JkiUyu6#sQM<sX`fRoa!nXsPHH{$QT_X8SH&8rGsL;g$v_E@z< zCiy}Rsnd}FmW}kc{sO1>&-IEK8*#qm22Rf}wdy_Aa!uCRU?iJTzkt&pBdEDv?G`y3sv2N0YtO@PE2XCzscBtga>e_RlKUE(Tc-*d|7m z81E-(d6f2~MB7DweS9@w@kfV9-*qtOW-_Ot#%5w_`mD#Zv!R6-OqzN$!__5xgJ^R! z<8V4S+;jA6|Jil*L2C=w3OT}Uxz}|WD=#lPUJvncTmLZD3mod(gb2ZwmckQMh{3%J zu2Sng+Jc9`Q;s({qn5UvJHzP4eUHAQwr>yKHBx;to}SI}ILR?s9V&LwYQ7_?Yp8!q zxWh;B1Xf~S<8shR4VH?fsbFA|Kc=&LQy|ubB(=ws{rXNYO+uB|^vy<|?V?oq%xe$# z#(>vT5xFv^^G~0Bv09qnhqZCTf{1XLWiK)f0jHPm(3K*3nFk@I&#^9m1#tEx>$Z8pEMvZJt^2$_|L!!{4I zLbz-8FH23T`H%KUpBXK@#$i<@417<3pmBGD?c;q^w>WF_&Z>L0Y`^ORKMQ)s{nC$D zxy??`PEJo%baV!Sc%ybd3ZL7S?`YRe)?W1MU45~<4L}znwo@F}$%~0Sv7QI--}894 z!TxI`CX`(@^eJuEs!}BT@nx*+omaE)`nA9C4VHiNBm1w93GnG^L06Ut*}4vtHILUD zLiVZBG-AGyq*Df*Fx+NeM93&Z*j4e9Tt#S{7U|{2pNaV$`_3;gUb>KtT5>AW<3d>1 z)(x=BLTpY1AOZD~@0+4wQ7(TRgamgqi!4aOYlAr;7Ea}|8wP$>EtbKY~0fI>q<=W{fF-1-*X=< z&E605MO4V&o%l#*EEu5CK@dgwg6#W%ia`D5FLoJ4>=gmMaM3}IEea*4kw$kM}W z;B+Xe#@h@LHSFM3Cq&$6bH|RRvCgLYXmc$O(_vCz+-p0+UvQuC>=I+C)Dd8a6suCc zpex)xB^8$lUfw4aUOmp>C|{XMbX0_iV8p!Y-9dN<4dQUVP2q2Szz5g3I{ouX_9s^5-PX0u>&G$?J0ow#mT0M5h z$+Lf2E9%Yb<$@IyNZb9CVz>YI$yGyjQf(XCD+f(pq3LaY$inZ;cUm<&FMXOH>22JQ zVssRjTPOV@5r*-un)OW7smp7dhzhlbISU_$UwH|*y+JfrSWctJ3y8>mn0V#CGb=I< zmXOP*{PwzP4{RQ z1d1Kz?uPcBud;m$2_o(5+RBDc3I&86dR*Fs$4}FVRRLFtWodDdbwV`!ZWWVlq_Jg9LSMo2vZY;1KY1o``dES zA2`httf0;Vg)0y3^CBGhEl#RWRuF~!#>i*q(Vvl7)8^jfrl$Ke8pBv!(>N{^_cdaM!jE(}D|I-4*CgfKyv`Z)n?*LC)(&`$09_`iChacgv zi>E#3^?LXXxiDBHvO1x`vXg1`nZ2(9aBWPf3LW6@j?-IBv-;XuyggSDPAV9&Q{+0U z0ZJb2+X-W_0^TvSL1=728r7u|5uUb^Z8!hc`hDe}Cl+3aO&WJC#Cni8ljO8LG;g|LE4$Iqmj0Y72cA)WI!G(0Ga-uS(@50N(x zNza9A`r7fOK)&kNTxEL0%9$X$qjmi({sL^u7}@uGXN#pNR&_-34wIyTBZen#`>=B* z>J74{F7gjW`7r9g(l^DU>Ecb^2_%kuwH0U_NLKpFcgo66v=Sc?*A*Scqk*F)LrMl3 zUgG*77;1X*mV2;ilxR;}YyHGjDfe8BU=+AjWMlT*gPWes?EP5UuG5+k`Lz1Z%tC~D zVSO}CAu~ENGfBUdD-Dq+rB@=HXf-=KErM*a*6=yVb5r$_;rA_5UZYg};D4a1B1WOC z8W*v&4U#6C|7vsVO(vtD0BQ%e<7(karL#Z9(!p$4eGM(HCo0{*ad@Q(S7_yfEi7`` zM~>kd1Tp*cM7G|$i99w2nG)mm`$IloBU^-D)mb^0GO3tbD@M z(eRQqBHzP9Jk;31{^C!54sJ0V5#*7$F2Dh=BukZ=Qv}T+Tt=aP+c(M&JC6=WjG@tw zbQ7n9r;RDj$4&#b8tLo5o$C<=4(Xha82vRR~2PZHUC)&(P4!N#Z zM=JorB)`afVqa1OFM_?TxI%ACnb4@S>ww5emEYb&qZBu~?V?p}g_k|8+-`_@u4<1X z2yKZu5Mr)2`G-&d%+Y@@ ze~p1P3els9$qK!YVL^X1`7=zc^Bfu@>^~R{Wg3ab~8+p?D4 zR#{@X3!owEj6U_)7WNCfyYoQ|7E7Sf-_1rO4ygRO^rpt!ne4b~4Q8(7Wds!ZLqUZ; zK^xX~aS7aj$ZobbFpyHv8lXhM*9#+AHl(hO2Vw*52!$(6;=$Vsh#R1F<3XPiXQz#> zBvuBE6?m7d+!5BAsq<;#qT@Us59ERhb8{iD!agn~J$Ho2LD!u#_gSP#Vf(vcKU+zT>$tUJ?5W}Sul^NI$6PbRRB5+w7CNU-n`F274TMpn6mebFqjvQ05T2O*-7#+$5}z4UcD5q$ zDKNlLVj<|}eqrI>w>nFw%|a%l%5TH$E1-CQcnU<1!R>9i0A5Cb zG+JY+^cn|+fewn&*mLtG?wT97c55F4+@Fr}q3>g*5JAm+Fp`puk(1d@AzxRrb z(W~KM#=yFU%A_&$yk3-IMG&A;34`4sK^Y1C(01yCe#)3BSO~o6*56A^*Q*Kdg?b+x zb%rQ&`fvzD3bOaTOiEN#GG)qZT0sI$ZGXwIUQCY;`jvyM9r<)P?0DG;wT=4c5-1w;a z^}TjU=#0(A(m(4NBbCxPCVKoz{;m+EwAm zWJK&z8Hza~QnaCW?pW?u6}dzGzH)_SEL6Dj%!ysrfzp@fQlO)A&cfoDStJt_num4p z?ZJe91~mN8fktfkR%;l z)H&9v9UeW}PbYCKv!woDyIh;JgWs8^C5m?#a(#}R2!VjoI&srg{BDw%|EL`O91sWGgQQ9v>KtF?cu%{jwL(D;x+;X@7z#lh0pMg1$)XOQauFJ;Y%~CBKuw zGwIu@r^0>L;P;zwQN4bkhMpm>hvowh!)OKxBQx{Cxy3t9B6{2dW~03yKtIRXHsyvY z8oua3TbCg)H4|{CAb*A`#g9?nG%wXfs_j17w)n*R3;Kd*Z(jhaxGUF^cO^gaicEP} za7+n%%l9=;QyzxeI*0GqXRaYReR4|m+q;l{tLnz5j7SEw`PsP7o)wH(b_svHLL`R3xAw9zHk`#3$*I50+;N2=mpAX zkDra}C(sU`)oge=*CujzcJ^MZP?F8f_Z7=Gi;M{B|0`WYmZoF-Tj5`GX;=Ze3z(A<{vt>`^0LY@tmKpDPLkuvK1y zkjld67;TQ8^d9f~*?_1<7YhCZ!UQoGrW6qeUaEMpEHaIKnEy#W-DqdV@R6zYv!h9} zu;CC-dpka4A#VIas~8c>bN_zEcdA66aM+u>X7RM<#KHt29)n58fc*9m||iZCyshI!dsAmF7` zw3wTl+y9}AJUu<32)d?o^ zY4AO8RqHVHBvBy~t&iaHN(yYz#}A~_MlQ#pJW=B_0ks!b3-EH)Z!x8whq}(hKyOnh zmM&s@`-|jq-rP#Z%NulI+%pc(^n?W1wyd>nAiw~pyFVgKiU2Tr=mL$nHktAoBx0QH?4p5}!?((p{7}H(ya?o2}4&4!&*vwOk5c z7;-ixHk%n4NkPJ>pTkRS(gRV?)*=Wq0d+?eo3XrVL~dL~OGpAAv9gh)(IRrHQqY_; z4haKt_CvPR8pnaeY+#8)_H-;Cad~N1gsqvPE~{kJQ^izU6eg(*_#_>8AI)`dGQx0j z6cDVSlkh+?2164PuuaX)Z|=3DC0BiI*wcL;07H}CVw6CK$}&O?p#Ie>Kc1)%OcFN| zYeb=yE0kqwcA_5tRx};D@u?m02t+aNe*ze8ZM24-|d%;>uBFlfB6;6p%Pj1 zL#%D#J-DFLxTpLW73xYJx-TWf^0OViSg#+F_mh%vySlscN=r+p0DarD4w!=Qa7)O6 z3PI3O^K}x9e9$RKK}cqi@;--(BA-W|6v0dPCFMF6((=BItXJ{`!H1xQQjx*_ zXSIRv_iI-CtSXwP+-6?s7|H}5LSR1vE4+~p8(p&aw``cNUWfqouCqY{VpBF&N=X3* zLXb7c@4atBtDw<;3UQd!6a~S&p|zo**w@n6v7ypr9E=TS=Q}_gfcExK8KT)5Q5T- zs3Oao6#Ejs$(5cQeu8EwcJeZTs?h9qIimR>H5-)qcCjq2 zvI^atp+T^Fg@3f~mW#_A`n(T~x=Z!0K9@4n*H&V+8D*5rp3(wJv@^=Xl#1%hVhnJo zj&XANkjOMYey2m7KgI2q5n`Wh+y!ER2HqnT)IiDz$s%;U9B zScDtJ4pE&s@E}w9swStiMY~FUHNQ!``R^Y7h`%@nlN|bshbCg_T8H9$CT0< z-{Cl&o*v71^8U#j%Dp3l1^^)D0BRF)pqv;w62mndNM-5qbUp~-d=dRui{RJa+Z!O3 z2MN%B?s>QWqZ?{Zf}NDeNTU}AGb76ETS<2-D=S!EO;}vJ_r$JN_7bj!5Sg3J(rRWCla)j+gu*BE(>{>YW>NPsv9(}QqVMDZNt@cvl?5s z5{yx9ISbm5Ua@8d&ozX}kmVamI#hK`7zVW)9yPuY#5#{IZU=n=P(a!N!pl+{cXyDx zYyeS$U#c&#&yn37H)4SKR*{xgW;j;%;uC5<*YNQ<9jx>2$L*4oY>1@`3Sd#76}Z|i zOl$VL&$WXJ1v>X(ztj_5H1C3q*t2aF0sHI!E3=sQG(DKBU+8&on~~jTEqV^}0kH3I zt$yR+9N+EDTs@RF9!*wH83$_YDqN8WyILvNhWuM5B36cE6su|?h_aT})|53Lddn|z z3ZQXUi+D{X-EA!06)0B?S_-qIiWg6~)$8`pW9ovYDbT?$-qmP7#u*h06mdm>?_-^s zT@KIbf8iNnmSG7oGdG9MNC8nOYr4FUOy&XD*-#AzDq;!<4N+boSy@+9d`G>;!z-_< zxnYb<=tDfK`~s8qRfGE7^$Bw-M<0&qca7r96-i5bGm&uE-ZxX9sDH9S4+3WZ$U8%T z;sq80ZQq-bF}rFFL@q$@Qya`usSCv8tnVysyIThapr3%$KrpZybmcL}=E-jNk@KB$ zZRCIVtqjyS+uPfq#wbL;?DA#bSVc9!Uen!wLM^W0GB&)AEtkogw}e9cPVjDpV-}uC z@}b0Z3LROAbks~je$qAZ|Gkj|7>NyimqBxJ8JP(8$&v$Ta%IR0&1)n?EiP^kSGAtO zy>Pdw<}eOuv^WQ72M8e~IF|%gPp&f7R2Y(#izfg8|8G-w#L_9P7?A$_u$#7o9IC;u z_tz7Fe1VFZ8V=N@gD*L*?mYTmK7#+oSpUio*yd#aX_P z3v$5^;Y|Y2F*{i)98jbn9AIcUj&v!TUjXa?LknKowL%j%`6AUT@W#W`&I|mQTV2d|GaMmg3Lc~NXpu8f{8NF+zN5PS2Y*$#nhy+_*+70`m@8Noy`@()WYxIquLANjH|Ve!3@LYEcb=&s=fW%#h-F4 z&0P{oEWoe+mop)Xs;J}H-LByzadTeXe=J~PsEYM0cEIf-IIl}$}&zvZ-AeCpksz6Nr7&(I2RvY^%KmxfTW z3)r0lQ((kNl!pZ`j}xIaLT|XEt#d4)f<+Pp06Eo?pG^@s@%ZGijw&k3{iYz>FR!j$ zwUm2`uEjX*wuLDq>;%s&+BU}TzLM~Xrt2lH_>JH+RM_M=soAb^1`FuUUk1{^?1I<& zH?fP9!ooR!g=x)#u^?s@bG+%OEqq(U7c4%Td@>Y^olwKT zUr=K>BW&(;xM%#zqpN>CJWiSdClZU4FrtqE5C7Ab72v448;I@No}{BP?eBXWwgpl? z0v8WeG>+yTRV{$>`OT*G^*RAuSw31&LcPz`yMAIfl{~OscRr~Dm$EgJ?EccOo)JNz zhO6mB<^_97Q>>@|9Ra!Tn;5sSM1E@s36BG0Sn!ghwDrLY$}5Lqkee%aytmNJqO%z4 zO5RMPSh7itCJqCE$H=B&D0ZTN=uhK{T!5zmtplcZ1_4lKG0@YC3%%Vk z7MWJ?=9DjzvjT^^t}w7Bnzh+7>$t;j}pINPuc{a@~p6Gf3XnmI>$?M)m8TGV8#>iQ$S|Utbq3wBj%? zumTC$Xzs|i3s7XjL7S^&JL?Ur?PRA2Fizpx+$1a=KXHl7PO7NzFxysNXtf`b7u9?7 z^1-{4Oy19d$?o=4!>6VerDMB7s@MO`7x{LB#~V!_pao46_7{Q}ULMQ?B&86*NzqdV z{-kt@nTEMy%5vWP5Rr*6Hdo150vO1`yGghgh;8s89fcFtQvX}fW0x6#73e8)>@U}Q ztXGZUMtR8|0qq~~syY5#8G{yCB6MDSUXK`4c~0lPTGu1%XeXN0&FXn=bv7lA4+d0(s?I z6nS3QWBDPWFZ9m)uS)~=%KUGz0h;X0mu{v@k}iP_rVFm6tE;Qch6{A19o}55FzJa) zOWUeEFr@Wbk?cR7-FtUszHU2x+D{54_R4GdkM|n8M;2;{E;oIDX3b)-G#$a#*e}K` zK0aJZD=2h>(elSz1+gm|8TLPcr5hP((_Xi6i1w8aQTRjR42+nMokF1V zFXRV24Ty_i9o)P5rf|1G?JcBGrEqB^ht0!_J&nl=lKV?H7O(g`?QTgMDwETm&8qX; z@Hkk5NlHiv>LK$+1v}IO43?wBkF2nr1oB)n9{L*0hXDo ztEH8-c;f&<*g(ie4Hrl;Oyta-r}!QtIC0ramJEhIsUUDyNM~PL4F2DR4Ssy}l$hHN zeu*r=6Euj>D=i$ggz7Oki3mA~R0HLYJm4E%E3LlZf2b}dImCMiI`nhIKoy`>j3~6v z2!x(gg^u3bC|^)E{!RFiWXQf1?JK+U)sfNK=BCN*#XgVc2~C$VoB43|AP97g*PYh% zT=D#FXDl30VBJWB=88*o9uI%Z#gmWOM*4K9aa z)Y*T*;!4W^StkRI3d_N@fe1sF8;>_t@x2=MToymr;rVI|oFO7_-@SX+UYO(e0Vsd> zW@|AZnB?TKw}-9dr<=n)X>2B1c|mh!wDN7<+;ahiZU^tb*3Xnpb@i=lv03BW@y_3H zj>JHCGz?;kf?$Du)?Isu24j?!8X#ERnT1D}bGV4X^liA#*&V-&6LLvQOj+so`rZBu zQvRP&VWs^+c<4!*U>Y)6Y@qa6n$^eSlh$DxW#Q>}0al+#Fb1Jya>fT46O;N*r0EZM zFQE-28t2jDrQb)gv=BVSXKYK>)JOiBR6Cz%Iyqf&MaBK>wYoYz7!UaHN1Md{o#^TI z_NY{^DS@Cd{7`IUC!W=-QvO2$dL&1U5^9LsTSgH-p?{$#E(u7;D;!rPf$;!Hr+fMu zNL~MvpTmm=NYfZES&pE-4(cf=mWUPh6EbdJ!QPV(@hcWt}7C-~N@4NJDt-X|)+4F3l z6PY5lhv!!8s@O+6)S3l%S+B2VS6+f-Ab|g%4i41Sgox#}+1Ad7g7MuD`{r}}jlql_ z_~Cb;@r)=aom9-?3%OkMCx7~kxvLXHII5#VjE4jrk-iNCG*Bsc=zE|wcI#naWIR(W zSx|wpN5@}xIz`5>QAyCO2*Ks?>U(WJx0$U$1McZQ*T=30UdjIFmm6Tb=ib4%JzQ@> zA5-1MAL~42dV)A+`E+&!|NC77Blh%iw|#A!g(kvA_rsqD^Nm>GoMqUz5OOd2a3JNQ zWBw~iuNWLM%n2_*v@f#rTc9{w>7#A(~2n9i=Y zp8BDBmVY1sDr%>jJUJkSLz%o+H@juWz%{IZFcdljy~1HxOxJ7svncn*pV1V2o8s?X zmvoA``7LDCB_-uGwc?D}qA!w-&%&`rVu(Tpenxjz5Ws#P&^C6&2$A%q8>NT?Q@p_`B#N z0pgeqdW8#iJuI%Sy=6sh&fVP}Jk)SUlQ9zkncXE4Uy_sczDKR`7$bRJh=4gDplG1)xHXpprLcSy{*k(42#fsByqR=DYGs2`;ml@hs0qqw0G$Zmds-MCkrhtBAEBt zJ2(g|J3@`kOb{48zPg@j@N>7UHRk(wMuEq>Q*mcoiYFdhV@efcraOAgjy!R-78o&{ zua-X-mlB;2fRvH)>-`A#LdH8HO&TpLbHUhyAo@Q z9Qe0Wci{%>%sy|Bqf}*~)gHD;wO%*^*(ZB~IXruo{B?LI)7_YTYp5FBX5JLT)F8^# zYW8{sdPvCAz9}T0i}B~*E#|pmN1I`r&5?-hGZh0FSZZ18%}#NAS{i5!T(gw7O#l2j zLag8gK?<|p2k2phCXt|TYbayiY5eGx!cp&Y?794jm7O(H`47>r^Hq`S)lgsp2AA88 zQ}79Pc2(5WLxMh_R5j{erM6?@j+H|1vCnafIOpjp^Ay`-KzQFN0b@R7;+1{=<_LFq zCRa{-e%!K^5VEFi{iVIq%*BgUbbgWzati)Kc0?OYU_K4ccP1bKz!)%USM^$W_Oz$! zfdhxR`(o6DJ>9Tcn;(tlVUT+<7jz$blVCxCBQRgxbzF6{{f_tChNrfJ&=Lgxl74v*#4=gW%IHPIAnA}7J;UTO^Lg{5Ig7-V~ALN@Kg(6jnmAXxE60#32E2rZtp!vN z4^v7zOyBF4CAU)%{=2q->nyznKMJe#iF)IHp61fGD_YdX-Y~W;-XxZ z_s>ULtSrb3k?EMAN&V9$Wj&}EdSVMECnrzW*k&x2nFP^Q*#evIzUa$cTDO7%IDqBt z!knKVjQ5#*`@>YGtY3nM&v6AFlzREB(54PJPU*QX0|(P3{BVo`ej4F*5E8_giW&xT zB4iV*N_*-Vn+N3TxCvdyiA&#@E(sY=_cBz5uEaSVTuWBbj%G|hrlaD9;DQ1gP`pK1 zrRGlNGUFYBiwvl`;8(o>5CBjW^w||p?2IW4(B+Nll4=NV0GS7-Hu3!8AV-BnW6412 zCXCFSG_~)_i-f*m)FPIM_a2|T)1}GafKx^|XSAr@iiqI7j3pP3U&4xjh(i95D)xTD z&M~`iE4yH4#8Kf+au?`^sFrCk&u2O`Me$u-y* zSXg>M9#U@7gU*w6aFU#z9fR-`-wXU+t@JwbmqixSui*V87a2Y z4+Q92$R}`MN4^;#jBbB9v#4c{E3$Cd(a8_|X5F}mpy(dEr8brDTWgA5V&LMu{P9Y( z@HBkWANU-duV{h52m<{ILqK z4UcuyrXo?M;#Ig5tr*Q45t3rUNtxW32r^O3xXd|MxwdBSOK@^Ayq;!HW5pNQS9I#m z2A%%ABAfm7YOruBVo9_YgP6+(0)gh!u>vPgz7-cAPn*OpgZ@v`Rqt@fe2zpRZN8WD zVN?3vXds6oa+Ar1MkPTM6buX~ryGNvch{$E?|y&ET|BTJ=Ev|>%e-KPt%ZsB)=%dW3CII&LrivZ--5;mS!AVR{!x2kMfzHJ>x+4a-;9eGe z_}jn|OumIVS!yiDcBZ{=uikU_|HBo0xA$vR;oi4*rH8=c?1O{_{tI^|E|M%$gc#SD10s^NZla z@hUK5Nq}c3$}j3N8jWr-$uFEW8OEgXfKX6BhcQU{cwEtftu(ii;V^v{eYai#f|@ZqiYhtt)SxU7Vgsa=#VOyNv^7 zKl4=%E7n$FYg9+~W-~#L`{) z&_&@~_Xm0Fmxg>+OVUg zKty(_uJhn4z+sBBBM#e_W_@6h9lRPqYGecTC3|v1!CEw8Kq`OK0m_gd?z$yU`_ncy zHuClJM8;B>L@NZL%UW7`v90(qI~yGWa>h@?UJzv+9sa$TRqLrzw7&fmxzAc)P>;t+ zB+HSs5#=NqPe5i8uDqOdfcctqR9qhgR)j_7bgun4?Mj#1r)YB$eSzRT>G}e2%CgUbX%lwfQ8^GeIl9)JsYmBqL^s|9{ zu^tHmP-R4>-Is9JSDF|*emEp04igzmRz*RTE#G?Z`F7w%@R&pr>4RvolqF#K6s97Q z48J6K*US!b8?8MsXG_F=MlR=sHB%Q$EeRYVL)FGjhCnT!Ng^fLXTsl~=0(UV8|7A9 zxJ~8^(}caT5B@mAAL88yNg3Q@$F|&j9zNMMkLl5K9@=al!~hXg*JOzf$j#$ZHV(LU z2*V0XONScuy}cV7=NffUwZqdcm&1yzZ9~q_K(*(UqmTPtok7_IIe)0!f;Wa@-tAip z4B@kp0Gmxsh+A?hT;wwo(26}P)@&|WqI1t&k_-)K&wKl1fQlAY9uU~fjsMe2mD(Wf z{FS`^4`yjpIvLkQ5a@%-QHpCwVs?Hw)4Gl$i+s}#V15g)?qaCD3SU`?Bg&g@(Ng;v znVtjYOCl_)`C?b>h$@_v9Gan1EzL2FFOAGIRGG_C2VwKa(+I|2io}t%?sri+vE%LA zPNIcmA^c^N%gcN3NIYhgpg120J%T|C`lXO(`VOgCV;nDeNzf0yBnw8p?~z zXM^a|fyA>si{>w@nxr{PkXnA5E;Vl&E-2>JXU2QV?^Af*pr_C*)IcchmifpJs-uUX= z6UZOn3;!&zSlN)d;gJ^>p@|KoG5Q_%+^1tg(3x&EyFQUH+)r9owqw@5HTs#p2h6b0 zEu$YuERE;t93haxG33Irh|Vw!llA^&NB|*+Icg)pVCKvt8}a4dOlQX=zyRS1m4p}Q zxYVGWNDas#n{+KD&HgJNr$_*e_`qGjIVdSKS&mZK*FSvrc$&G<4CJt=)0?247u*AC z&#|vw&9n+#UE_-^nH|==G0kiZVZOn1d|@-%HeEQ}!?4=Vu_J$;gl!|achE7w{QmSV zjz)p>gjr8QU0ofjlm#P3p$-0k3p`{`9SD(QuJ5{aH#Ma>Xui}(WRloxywsPF{T%l} zA}+j}@)lV6zvaA3vheg!CP51HBbSwx^+}14iV4JVP`aI0oiEcM6SKx4`7#gMo?-C# zB<63wfMu_u+Txra)_o1vnRwRMFf#wU@DnUO8w!01566fbv<5WHO%z)3wX$c#ug;Iw z44Mk!gV;`BlMj@(Kht%9flj?nN?PWY5G$(bh=_?ojl2ROG?aG`r2{$#sM2D_hMt-E zLa~sUFgfaCY!}!ULj%ht5PAq{W5=dF?NT@^)DeRge4eY)WXJ)t-c8Q@ zqrmMbQ1S0I&BR%mvuYXHfDVK_x~f4uqHVA8^ORa*$OMVZE7Ot5P_+M2b3p}oVsdDx zh;-vtuaabkZp3lvfnG{JtRy@!W_^zc*UikEbqLC$0rC&ZTuxY>3$c8d5Gi81Oplq( zIW?GViS^Ovux5x$>^soPTl&Q#B;t3xZ5rDd<7Psc`3g}&zVY*7iC4#RT1nIKgkST* zP2ca_TSaK4t*h$-zK{4mKyhmqclojur}r<6Je!*VUfJ~caLePg_QaH((0nK}qKDj{ z1T-H!FRe7~!v=Zt63FN`MeQJLfC8Lj{*_TYda;VsJNp#O%RmMBs3;{0Ca9a9E`P?8 zoHf7CIUfcae029up|KOttv;A3gO{?7<-P<*w~BOng;ecr7G%!t zmCnI?Ds|5CEk5_%Z1>v6M#p+0`EvhOBCIjH?I{8U0rN!R8+PY$hm03i;Q4wScR%yZ ze#yw#^>BY%0nn|kpA8m|bG7#5ZifreY$iVqZ_c*GTjp^du7?9!9+$(&h?1Zl8c;7- z2+x7U0_c#(LF+ak>0?Vjpoz7V2y!O{Z4DAUY-s3^vJMt9>Ml$Y>PS~(4i7YX>d`x%Wn_Nm4H$sezz>U6Oce;+BD_daHr?G9?_*7ZxO) zEt#L{69ckg0Cf4g&hcQfDh!>J$Zhd{IgDJ=*qCba``cCDA&&W`&gCPaO&Qpb8`Mv! zF{F{t2??+IZGMIo>b${t{tw}!F9M1-5AsIsZ}ZP8R0^7hkqd`>->|H(3UrF?^>W1O z6{c znMA=52}?y|+UPGq6|aXgcyccg8@rZ?!*iW=DLBA#2zAi_e@RM8qImgoplQgE)o^Ph zZ(}MuJA0$lHs*Uq>h}9L4(t7Ti_iW&#$HE)^Nm2)*AL4h2ES};y<g(&rNz*X%hTMSM!x0!IuG)Cn&*9P0-5?jkA|%{C%Bla+LIzQG zZAO@y2WV$}Bvu#plNO_JloV|r4p|AVBKsDjt>w+-Bj3RYZH8I*Q@(tb;V|zlNZqvy z$8*o|3D$4c@+IEd%BtvVnQMF6LySshd_w{ji6zxsA$*YWsi{@)Q9x1B1voo4{|7Lq zY|)qbSwRr&n1^Lq>jNL?i~sBYV(cxTvP`3>(T|Xj4(SHL00pGG5h+_zQbD>~K)O=_ zNeKy+mJVqUqy#DHl1>T9dtPVeul3)%u4~qE9GUNX<9W_=&OUqZGq){NYTc8J<}J!w zGh~aZ`qkTP^v4d+sN7LExL=*HBxi26;9JoW;xUa(B|J*AD&MHcLYAyla$|Pitk>Am z!+dFCOtgRybSg))%0FMbSMkf@nanPK#X!;43(7FCNilqz`(^R)Swm;H0;=v$MorS>Z{NlNJPE(<>89O-T<6sxgh@w#XA6pJATI_U?~I!A$>hVG{P;L> z@1dV_B*O*N?>Y&tTCPWELfU#npLsiO{nI^}Cr(sLFZkl}-H?vqbitnZfnYp+Ktn2@tkoJk(^%LbGl^RuAYtsJERb2z`Zp+b)nRo=T%1qlGd!w853o&U2rNBkE8@q_{lrrj=Y}4djpj(hw8}Im>m2|1qr8$5 z?%6M|Gi($wDe0^ok7@p+2=L~DZU~%tLj}5_ux-SS*Gd5zPnPvmV@WB6PcIA%=Aot+83_-H04fnso=iv5U!?J#S@R>b;FE_jin|QH5~kqe#%dF zWcRGJB>zhkUSz_y&Fc5?S7w@ErDW&zpoB^N znHg8f`FuEAbHJwRIl70+*VIGPcj{D;b@}u0D9P)pUsGr&imk-@h{E^`EizF49zP^gDrtGL@L?hbQ-0a&}#KP{{!Myx1kc&YC zJbsoROA(_|un0KneT+xMJjs4``i7_9@jtotuAm?SEnQ%_>&;^IkVbFfk1}R^=Gz|B zpD3t4JiPk|?G1(@6~1UD*O*E4^b0q7o}sw(83%SY*+t&Fmao@qTBBRk)zqWEcA{iXs)fbDlFsCcHQ;@nqb~_5g|T&(ybXSGDRw3f7p9?pBb&l$x0?vAfmJPUtek>YVFD#vW>!pEv51_id?BqNU^=ooVDTBkwKVV2~Hs+BD%i`I^3(S-C7_Vm3DXUDjldpeDUOu`xq{ORs+1fhFvA zIaOt>l^BlTP+9q^F|}`N$-4e%8U)K*KoIZSo(TjpWtrUhWp7s z3t!f&?;O-pe1w(T$%UPJ!^E7M%r7^} zs$TQY6CT;Nmty76bGh;hMNhPzgpj%l?`uc2&J#-Q#t~ zxU2g4#$r>oLd4_p)A{aB+kxTmk;EZ1E907WX+1+T8CH1II1LcQGSw^xZfx>?4Y&39 z9;3Rzq5h1fJtw0R=MnYY`^@jovfHBaFMB1L(fv|nf8l!VKw_tS%J~-jhh<6ruAhxiW;-B<&Tx$#+v8nHyIA*<*4y^oCF7xu2%x zFD{AqBJkZ3@sULI8$Xs@jl%yOlyDk-2fse;qrWS*ZhqJ|BcH0n- z-tM`jqkKlsOr?A|6sp2(5GDKQw6W8bhPgrm)g^_;WsRt>Wc-{TwMHMrexg0dAKH!@ z;=xeiRz4j3V~GTyi}*R>XZplP&nhM7?9#a~@TJCgFfEVO_8C1;;H_xX523!1moRJQK7gm26l z5jC#k8BtNhSgj$*IW|A^o>q+P`MVj1=cUEYat6V~2Q)oQB${&u8na#13TjH^D+}d23uWT}|f_zNq_^#2zod_X`>Enu%Xi&yG_F1x+Zw z)45sr2mD-f=FO*tJo)B#*m=r6zleO+gr)yIU`@%+j<*Tz_M1Cc&C(7|;~9#KFJDWS zB2nC+%5fR%YgARA-qnEOa(tf_N;vARbgOxU}%PrDahU8}0dNLD==(OToy?Sxks z4x9pYdbZhv9J}MhNT2K9@jziI9O!eey=IClvY0X)Ee~7`a+|tC#s5UL{l5Mx*{pC< z2gB$8%eky}bfIB#_t&e1E;!rzEyN+=E!dZuvvPZP?+Oc8|Iw2D36!0@PL77gi{+bK z@b_>nH)VAA(A-@;#!t^wzbrLoAgxe@N4lM?>TsY0*|&m%Axvq57Fk2r%`f&SuG-x>5;WO74=W zcYol;W)dmpt9RZ8oL2>Y;QDvW)WRzAd-HP^~GC8`L zlVk)SjSe{uP8avS#&X;;9jX=J#YCXT`CKtWb}3ywg621~k87-oFB3)2ArfCma_1FT zpW7yL<$z-tpkr=(EB-*KGF%zVgKvT;IBysx!Vjr-NAEBzCnr8HwMi6H$tB2?K9o8x ztxTq#}c28YgRUyd^Vwr z4ea<~rC$&yZSTMZgB1vYv+qqj8nw8j_38XjujA`i1MZH0J`l4QlYwUr&AIon$xjwo zHbFXQpfLxCE4tZ!^*aGW@q>55X2O>WuP!%(p2k?0tL*U$i-mz9k&K@U*za=W&_R&f zx~H-;^0u~C?2|?@I%+za8v|YgmVbu=5SL6uNEFy)$K5e8WWYj|@M!^5yIV_wXaqUs z6(T7jBopF*bs#NTn8n$~jIj~Zi*`aGB^c0T(292ZBA%?Nn_KAWsBS32Y)TGrH|3OL z=2^7ThL<`TSY0iGV28&Co4=yaobT~?_g+AYr@ENa`VUAae5v;=c6yPlQ;K*rxD8WX zRu;q80NbP5=~+g5Fg$eRm8rGg@}vRxNo+&*>aIj~7vw6uM_#$NN65dHX_SkMHT;KE zl^nqo$BiTBJlc)R5bU@2{b^W|P;@W@$A4dSS(;dgov2^eFds8g=Z0cofv)h~A_DII zx#WK!a!4UR7C*;JT^`6Wnk+Inq$evEfREpuAzzWZ(WbhBeGp_EM%#Dk)vH(dIMG}; zOGv&_(e+<=dG!15n-VBwtx&&P8yWeO(SJ&P{pX56Qax9bZ?7k|(c}Yss@_q;bXMF{ z$JHT6vx|-`g;MXvO!@6vhL}%IEE^A;GCxS!^}InYTWm+mzQcbo5%= zBH;=7<^1Y4TSNO#3}MBaEUsYiuvhgbK7GK^)2YbEo*)0lgf0Dg#*Z&vUNt`tV$|4N zt@zPwKW>^|Wj4C*oGBl9o6YCpwN@aR3DIV}fn(bQYF%I-Y>gk?!pDjZ2PkWLVvUsi z@js=nVO9OyAdNT!%`oleOV5&4w%#v8$t}Ne>$XqT-&1r+{<+3@{V{#ujt+m5Ol|Su z7gBDMJB))*2Qs?eJwmDIHfE~_wNyK|l^;u>`uLVeqIWKMj*4B{eLCsONEnw<@HB8v zJA0O>@Yi&XUZ*}cRkRQj@wrZNn2-PG0#h3fN9RgGI%XQPZH$Jv$Q|-7)FKK-sm*IrK=+vLR0Q*Vb*V z&#gQ8H9a#ouM^I$Q*KEAIU7Bkwq`A!PYWzsZE$sQ5AHTPpxR9>xxMvyPXe_?Tq8Z8 z-%#rHGf?wDajt1n(RQIv=iAxlCXcHH=A8qvkX=a$pRY!&+?xyHVlKw8qwLGd%79%$ zi_qD&=h{Wb&!+Mk6TD8g6;;wC=%-#D(9h1!BAmgG&g%B^e?2?o6Wq%9t7WHw1jX)>Xl82KfUrEE zo3%DkLz%dWK^H2=5_lq3zE=H6TRZg9^~VV18%%j!7T=n1yYE{BhQF!%SB|xG!)-D8 zjOv#8^)ud^XokhK$oRzqasT}5o95-~{SPi3c5`!vb2Ags-Xa<+N@WYAmWAz5)ki}w65efkdHK-qsP&BqgXREcV3HZNh1rhu zI@@Qo(-6{&_&w2kc?kyX$~idjJ#}B_ms3=J~#`6|<$zH{4ITe3|zAr|Izz zm{Qq-K0O`&Y_@b`aWdDORIf6SP+z}8xPX*^2du*Q-|uSQzN z6CaMJM(v7OG17G0ffwdeNwhOdAxFg+I);H@(hjk)HuI7LVl^AJEw-RklW` zV#S=YOG-$A9qzkfK^V3|gOzoOAg4mRmIAYJw(OoR0c7e}A!9&56L_6=L~?Y!6<*t9RX&X(+zfW^qbr(z`FcgnL1Inrh;%=ttQOe5_SlD zZ#SLwa7`p`shkh&c~`O3Yt+)U3P` zF~xS!@Fi|sh{)?CvOnRs1I1?%jL!Yujjdy?ou1IINX5P<1E*Q+^~-LU(o!BCVnE_42Id2umyrsNC`T}zGt}~H9Cu% zfN_&-;;HT~;YOct(KLriQAcs7ljyVZ?XmAq>6utEXm#@CiS+Ln;?oM?L*v*NW|L0V zxg)Xsso6sR@xB3yJ5^GDqN{*wF1QzV|?F)a|}Gxyc8W#b2D5UDMn(7nYsD)>A@6q*Xb)JuC-EZyMd}Jsq99{bDuyi+l?CtDCydw zBGp{%nD4~+ss($)AQ+!KnqVcd6@v#t2b1(Ho^9bw-!B>`ClG6h&lF(AEpqHiIopR0;lb1 zEtn7^K6K)Ii`^x8iy@GiDe&bXen>!8m(Ehx8}7k4>8Qh|Zc(a?cR=m$7<25lX828M z4`T)cpinM|egGf?;Xxl-_Udh;(f!y`JQkAZOB&{33{^UWo8G9wk$#gdXmU~NdAx#< z3<{nqGnQDTV759FB1L%4Ooj0;EU!% zoR8zwgs}CkpIt6;GzZcAKdNKMDU*tx4|BN}d7iqV8V2(oYy$~cAJ#pB-Mv?i-vai2 zVud@L)*v6_iW|k#=5QfolnASs@}!0MK*pig63FNowE5+nLGtuBndjjQHlmHJ1ab(# ze%rlXXJkO-F_td|&}B_UU!MuI(lZRv_=0(i7&6AP@SJc~1TR{xB_cC7!2iPdmMWQe zL;*;HK<_yz=Hl+2>AX3KWNauDbg)Z=OwgpPtO_Px1^{h+%mE*!WDKEDi#;i!NFD{Y zk4xX*usHlU}D$VmK&-JYaCl)GHI)FA-Dk8bB`cZv-J<1ynb%6?Viv_TgYW1;_{|5?z zDqmOYi>LUcqz!6{Cfuo5GCe*0@Ng~?98ii}wheP1UIWrPlDq~&{`&To$zSAL%Fr;Q zi@vl{=z(@Rs5vUCCZHP(rYJuvZsJv6vc2fYLTLnj320FKEKg}GGHBM>`FSBjRNh$t z``BwMfVrL#3NQ>AwMS4&!pi=733TPG;C%oxdmvAN#3{}-rBNS==-q|l;^GL`Dn7LT z+uK{kK-czwhFzt{A6{5A{WZ?k(EbHm8t6Gu03~+uZ976>rr^8|ReMJ()yl_rT;rkn z{!)-gTlWN8+}5t7JidtL^;VRT3g8bYs$1*@sxpAJG6%zLEv;DJ+##W*4FyD~NfNlpX?jnrZQCC-YfW~!kaWS%8x^Kna zYv03U+Br~+pJ~?C>1B&d0Uh7sSrT-Ng;cLZ6a3ex2!KZ9-@zps1;IgMgZbKmz(U|| z96$pqj?0WcB^A!~Ra_N(y@_z+g5#WqfM{!&hTMAb6pUYCI7w>MB z{QA7}u%Y)tcAC6FFd%Z>7E|@~H@}?lV)O?`$xEStH3@i5vpGMVa(B;w#n1@@3!bu|hMRuhm-l)b2 zvW*!f5zh)RZA{Bso83iv zz_ox}mHDKT4k=Rq0>Dbp`{SmW1+>;X#tyAZkShA+>um?DQN>paFckNy;ANMVmu)+> ziUL62LQ7~%VJ=bp7e(lK+3bGWsF(9ip-#b`l~_Xh=I8vcn#%rOkXg$o%=%| z8`{A4mwCmi7%>eaV{r%<<9E2KQXUIgG}<5kE0;`81s;qGz8jT~GcoFckA5qb{vWpo z-@VuLQZK>-cNkZ}Wsuq;^l5(=_;xy7bP-xVez)10IMO_t^femod&L7mLK^X^XmKxs znF&E;o=h*&%4?C7A3^aX>;anlw~>B`mP*w?T95R_v1`+IO%X@)38!oWcIHVngQ zDg>k+=%^dI)tC@84*%GSHU0!JGCGuJWsrL)j#W}GvZ#prX;hF!$KCfnD;D(C)y|4i zQmDUim;R5)EJGa5z1v<(Aw)?gQ`fFi!n=KOTI8wFqKFstE8&B4H?UiJ@38KtnR(<2 zp5ClEp0afOki$)we?EYmbOFzJn@FK5_OAPt^nGee^ z`GbY%k)xbVywte6-;2b(P>ytQXPmx46ly<>6gu2Ydwep?f|uP#n%c{0wAW5_Q1f{- zCS7LV&6)?WXH$KdFD_e`j+VOgWJf?`GA6djh1B8vuw4>SFk{V8{2Rn>DBHw0h;x!a zW{l*GPdumsE?y23Az0z}$&*)l_2Op{(}V9hk#tem7-$k1~an19II^SxWF_Toe1tawZ&~s?fNK3PFtT0YWGx0|OjY^}j%?Q+7cC zA@~mf==RuhZ^}A%_BTmHTX&>=&Db3yUHgL-gVXBA+E_YXSD3J?sW(3 z5y!5yV>jH|?bsR)Oi5v&E;V(l_hbf;HbYkytwHD6cFDT{PxmvI<02{pF1sr+#|iOSVRqc7P7%Q3a#p+*zkqYQ(wqU z_rVj{91--~h=y);!h@&Hx&os{gL&yUg)dreGqZE0%!_1vho^e0S)6cCzZ~nnMtyLwwJ<#zQ_2MOdWDygqaVokoX;+K7kxcGY^D;uS zt5+Z7VBP7oKKXR5=B#va(bir>VT-^wX^_b3Yz98EbUeCQ8`4aaJ~X@#Ra9qZCjw)m z0m#`Bujd&I!ITuW1v)eXP)8gB9`SZQT{J|A!NgSkJ303TFz?Js_3ppl!;GA!#|#ovRievd#0(}H zug2WPyJ|P)ZjVY3v}S;Jd_%5j{H>9T>}lv2Tq5i#GXswy~m zf1Iz9J~^WLal3A<+;Q~~bJp1518d*4((XbGbo8j#uQA`Mv`q`;0$B|u<><)o`DcOh z;p4~XA_O!Mpu4Q57sph7e;=G$-kJ2K;zwW%mvwa926r}4!cIdH4V})uyoiR}f>%P% zYn8={#!uv>;amVClyARS?8ZxU0T6@iEfZ+%sCVLZ^lf9xH{x4rjjob72UoSKD-RQA zJ|q_ppgmcst3HY1=CH#o_M6D>Sa4V$y97_0qW1{~HX$ZTr`qAG zp(rXhH&?%JDZ((OJ$UJk6`KpVg^-ewkkXsfzX&;6GjvAZ{V+=xubX%n z4SqqHNb?n5swWO15s@Mgl`-bPXrx%!z?ctLruwnh~eGK^ZE->Y&boKgcQ=zjr zfA-!<`zBO6umGeP92^`J64I0^?rz1#Q%18JcHhB-Pc`hbpzrH@x)`!AAMKj=znT3x z1~iH9ZHB^Rg2pD$XSRLTMKS2H8?L*ie*Zoxh@uJK-w#H^P&t?mpWD)@_ZY2Bf!WzV z9{DydM2xO~ZZg12{n^eINj8ySzN?DM0E`!i?Ty33@vm%U#NGG(($doYVClNMiDCw; z2i92O8scJ3am!s$9&K8s0GbV7@=;sz^o(qgy(9<}g-P2Z85M19i}Q|$^43$_s3<&| z`vmgFS2=;aJiw2QGui4}J>4MU}0}HiDy$Xx47Z*8yaDjkyn;m+BxgMzQtC0hT;(r`9{ENznd< zS1>a&G}Oqa{VRWYh=o|r%FRU?z5Z?LY@q-0%x#-QCnv8FKda$!{fZfB{(3!om}Gr< z9S&i~4*E% zFMBugz#1RB%-Tr1(>`eFliL}Ihf#S3ED*|7kif`-%XN9c2UXr48}nLNgmk876p!`N{oJXN z{G48cf8Uc}O23P$$3#slH1^k1;py8`V%yQf?r4Ex7*a{1m;T=iw0j)0EmphmzU51g zW{va1`A7sf2Y4wR;Z-?_4k?>-@{>g8Loh?3o_d`+W@Tk1Jx+|UU^{&PHmEOS^%aOd_LSnl zvc_srNQ`K1vUSmU;_x{yDXzGD{DoEze@`DPMd&EapnPzv;jTq;3blPmP{$=ekO)u- z(=VETrFjwNKMuuaaQW-k0!a1D+?)|fGopZslsLKqOIG6i#2%{~s86=~3Xb8fC=CgR z>hiW5+TjbYKB+rjB|?#XC@9#L9(l_UKr5*@(5q0u!OgvO^i^#j4=o`$$byT51AFw& z<+X*6-vC9R0kejUyFMo1zzK^i6VRBp7k!Mb^U(qoH2@r-p#T^*lCA)RjV>X*zVpO> z;M)YUM+nVBy!BAi6Ps0|-E)U2oK6C_?%?{ZL& zPo)1ayQix=v3QD@i=YHFiUi8%ZRZ1f%mu3#ZRjoxFD@+n*tW}j)O`273^a29tf3Oi z(5|#aq+CA%_cUK&k~y zd0Ii(xlYOx5(F~}PPUqfEUkuxPQ|cw%XjLo$?EHYa1!wrSn7HD>WVQ6Q)yLGHD~#n zR7H5cvQ;-YgWP@k5Tj0L(8?s18Td-baMI)cks~&My#S#EzruX0Bskfp+o<>pW-Gp& za;c?Bgu}oBBFJE@hj#>6)-i)eQokRS*m)&gBZve@K2&J_(oMi2wlW_*C2|2y4wH&$<%x z)A(MHA{v~`{-EE#9oX2}c}-KM@t9<;ffJFbt@uBLAOK1n8^9kyq;?(f-K7|wo!`G( z&QGV#X8`~6uc)YStx8Iw1$&XcOhqDm8a^B%u`O@m7Yak*35vi-FoT(c{L&?Gc&=}X zW>vH~J8=bJi7X^)5EXH8aT9o-U4)&ZDwC-92%X)S?RSu#D2_8(YD;@F1(;60+7 zQaJ;foQ3)M{!lC5&ueK5r!CfRM27*9p=g;%;)vAYz>Odj@;EGVjr)Xft$?{56c*;E z!Y=#rWt}T~es_}KT-x~w2Vju__|#1eWio}svYGuhEYt{kh|rCIgLeNvwqVUjS{sim z0xE+ggvO^`=z;vxlA2jv4ON#sC5H!YU&W?A_sKA2#_U$qD5=R`4^Oy?n%Bw6VN?6d zzCxm-Y5xZ{jGA9u++iwj>gvLSi5ECTL?+xK=`;5u+@i z>jSZy0w7kG{ZSJht8pQUPilw%I~VYOzGm;j!kegfpFVx+UPn;31g(!z!SHTDM-m11 zf-Fm@ZCgPi;RpEdEuo>XTd)HF8;?#*ya+_5N~KdOViwvvFl6ZNmwPt5CS>>iUGrvo zI>~u+8t%ctFtnQx)mLGyhn0T`9177;9&la2^ee83OTop#kpcw1WVHvR&;oIBaRC`? zq=Wy1pTo{#HvIns4Myq+G!%3#nS6&r7{uLBsF08lD*XTXc5^YX2x$CZ!6URa1cwFk zm)3+6q*1q(lnnZ`()$x)_CdV$mw)(|EhZ=D1dPY@PbH*osEIY>H1Pk7XlaqN82gM1 z0aq485I|`V6Bq9X0Jf#AZT9SVv%TRJhAZmlG{Ek81D4+ZEzJ^T=*|TGhx8OBox-(2 zZ@IPsL(VA>+L2XMP|H@A3M1?q2rbm`IKQWKH=Z$;z;JrsEoHf2P>4OmFT`16DP(E%_0J@e}0L)D|;*q;~l`4=I*sOpnT*4 z3c~hUX)mIa$ETUN_dhoK1eTznAm)V&%uvTcHyRO;Zh9SrI1x+#{7>8rY;Cq%5VgQHmPQd)$ zY2{#C$7-#Pu9$(CM7ToOei4O2(b3TW9PwheJu`$5fe4F{6#%URWMX*1e#QG8kB=HeKw#94 zWDrN85R*eJPS*Xk0l0RJn4bP`z1k~z zE{2$aN8iRoja1olKo`XC`}g}0Y7E=Ls1LiXJux8lfhUK6l*p5S^yDH~8lqufO@qIX zLi`Qgtp*Gt6gR&=Bfl9r;>3f>-rxdAwgpNuo2_YSd}>}l>~M`?tvqwo*e7eS)k<(> zryJro`wI8E)h9~?C@%rEW8mz=%XQk(L$`7`pX7#S;uuK!hT{cEbvD04eQ=gsulFWqaLdao> zc+zE#0BY=A*gTHTkdRAe(6_b$M} z$2V>?)~#8nW~T_%&bI~wGP^(3SP9u~4FB>*8*7!ILkvRv(V2Bq#_GvsH zK70UAP9hW)w?1mn%&}DB1M*Z;-Bzvwxed`(fm|#>$TmaXIkwFbKmq7n5K~eHB9^FA zI=vg_d^BQ5OPN$64p)FZ4W%B!)k_v}{2+V0^6OV1xM}DEaHQtS3CK%Jogx$HFrifD zO#WnEp)+h%Q7`PSi-7`0D^)5lA+W{#ciH1_>M7j4dpEG+9GFg9^Rem~>S+?7vR(S| zk&?md#1@qF#j?m_u?9)Ky7(UX(ZME>v$HeQl!jxU|B67Y&A$LK1x$t^eeeNpUvqOa z25E51%qrU7z9LEVOUNTOf{;7|&VdRIfoGlM9;bW56kRuTD!qZ1sKr@RbwLIiVWgy_ zoN)wnLVbqbJtHF;p?ha%XSsQK%Ufp=P@94eru)IhLL=)?nZU}}`jFrd|L|oTbg;t@ zthjmYnv9A{SSv5QF=$LWEuF5v`2_>ke#7Z9kd~Qw{3aox16H@3oB#|9by7&b%LEx` zZ@%OlU)nOPypEopHgG2_20>J)BLf4&;pwIq5fnoxDOFVhV_wZt?6&Nc)m5N+eSYqx z$Vrjp6?f--Hv_wfIV{b?&GR#BB#(rWfiqd{3VibB^Z0i~GR@Uap^n`&d*P1V{9R6W z?-JipR)+MiKYe|5WhL>ACV(SHM@NKpU+Q)|fZzZ8%T(g=kEf&0#ck^B1)wL*1!s~# z{M)y|*o+>;rO&&~%B{e?76FC9tm_+5hFRIzY(Q0Y_1LU9XrqyM1;>s>^%NAkF3=v{ zhTv}SHRiI7y-~B^-xI)m8FnJLefG_;MXInj1CxZaE&s*U#_x&youv@p3!c`EagM%N zJv9_ZS~%vj^q8Zsq2mo6Jhq#42j!J#@R6-hO9Gby8r_q1qMoOFPeLuhD8h2P8deTH zgWfNva2T(&v9U2yW=@$S$p)i?`Aafv=h_UQNV{_Vy1DHs>=?${zH8eCrSHXERqM<=*R|pef ztbKU&(Pm>@7!hbgt=R$uMCjm#uobtUzIX>aZqX8o>z1>C&*rk+> z4L6(vAtbB{4olW$Pg3V*XSWdE1+04@00lx%1#UyuWTEc<@mU6((+ zpYbo;3q)4+9(?%cn(i#6Sne0!*>+cP!J`y%mc?OUQg>sy(TltBDMT_%%x(e?$->zc z&`tmjb_weWx7q9S_afv#B1zCbWH5O{f2PSlnfVSZHZ)E-6LIyx{T zqT}M)eB8*SiO0B%R8^CONTBRkyQ^uDM^|=Gp%6hA4E0xeb^ZID@k!6J@|zJ)iB_EJ zT32y|=~GLCJE)6lC`MJFe+(eTK;APj!R4k2r{XmZa@B+p+4FW)t-K9DG^ToIRaVeyAFtGOyFguJ))q&CuWLqqD znwh9t(o6nfVPS~+)5rwsHiTSZig(a-S8t&&Z6T{hwDWB5X3Oy*FvQSGppO0+h~4?5 zP(elt1*@J3yfTw__T)!vHd)f$oxvlK@KTNQTx!LnjGPXg)B4COq{^M!-Puw$y>g>& zpN2FugWF|Ge-QpP;$X!e9IS(~5~v1BE-P<^%DlpZzIaxp5I%z#)$rMgNivlX@q0)L zUi2{ylt`Z9eqm$J*#Gm32R;D#kup5W+4vSYo#TNtl`c}!yMM;*g&1f<&AtBMk%_CB z{sU$Z`1(rXJW*m3a-2rh0s%CNS(fPmdyo-rRQrx3QGiwF99g@z3lagma zH|r14+d?U$h0H13z*4N<3W?Z|D{1=z=EsUMe(0jipoP!GT8sWRA_a>Y>ZeEw2iQ`&& zpl7XB{!(z(z=t6NL=rgp{XqkfVm=AblOx7Vb~`j! z!pBNZ6{9}=K47a_*q-#LpFcD0SQ!j!gkNj6mj7pbUngY-PXVo(m48}@((NS*K-Yk{73-%oSNw3uVvU=%| zT!x6QVYESt{9xHg%V~|OdVP}M{TIp3;{!nmA<|ntDOd3E<>aU8b5Y)F7MPY(5x3IQ zbHhSpo;@TlvZVFdbGx!&_fSFZ9WM*|VXqglDfhIBG26A~rn}S>Sf8}~J4Q?Iu~W!? zT;L9c=PoBl4=jn%@w}O4<~O(RP8oNOKFCy*U0Du33c%Ve835!B;>KsU)9`0=?Sk4) z*SPZkI1!2Ud5nI3(H(bs)WJ&8_1R5aO>zeZCFT55(>IA$P2zXdi_<;qZ&)CTNZdJJ zn{an|y_a%+K0Ggm!4`#!8-pJb6_zMfbfsfDJ@kszWk3WqUGseHYF(-0A4BC< zKk_NY&M5Lj_b|(@jg;g!<}ZKvDAO!0jgA|INt}S$VSrBEF`xYBl*EK>o-bDAdCb>c zTV0)oms4rSTlJf7kL#y)g-amaMX2URb;)S z>mlQ%AJ4{$mj@oDy!o+U?*`QGNA`^-2Zv$QBCgV-b5EX*T8 zFMPD%-^HbqOZMYON}8naD>6J33UwI7Mih|R_^xenvEZgTmTWv`B->S%3}qR1je%;e zfuEm;9+Qeu?WdIu?eANq@$2R69SW}-jOmxp4DjXMT9ySPSrlpu&-xSm2EPov6UC6< zy$!D3ys7?Uvo;fQQe3QQj2N+oD@H*PvCPb>1F_+}Z&ffcsXBYLWfJeVw|AUqn(Rmk z>HZv7Y;YFv0b?rDPotb1=_od(cD59ums&=G)`(eWO*h9^j4)mjqu5J=$E#Xz1;h1G zvYb}?!;Z(4H}AV$yxD~F#XRX4=dcIMds>wJ#m8pmgKtKIZU;h+N^5k3;SOI=_9M)|w zBd~$mgLvrn4*o6nSf$4wA4R(h@i*Kp)yk)Nz@rc_wrEf+$-Xh z+%B`R*;r!efN0#0ix(HgjSpzJ3Z#A;yj->?8k+aJOH{HkI$qf&OXOHG#w*2WX=&l$ zz{NJvC^4XLilOVeG~MVsd497};DK;xLCjYwWgD!#-&?rWFg_X;CI&d5$i1fp8cMft zvHNbuh@psO0KnR1cL}rQsb1Z`Y4&*NGA0%ZC6%I-3NHGZ<5t~#R5&<&L<+;`3U4Fq zf0#|4CO$5F%2hL=CMAC0NmnN0njp5HTtvQc0G;VVfI}HmQkZh$8 zpqh#JUA^zMO^jtl+;V!860LKC5)BvH--LPj`KmFW53_If9jH;Q2pPqnhRt_WC+jn< z6}!QkBotj?uM0hNdEtj`a( z=VG#cD|1{m>3@_=?AQIz^XKL(xvZ+L`pEF<)5u*Vcoj}Hx?fHfZeCX6c`{khf%^dY zD7c@AKk_cYs{Qd#X$LKbhlkz0&??lDzI~M%(fNsj3=1X8A^lx0^B$#UzIacLdMe2` zeY(w4XLIxGXwF%3QcF`A^qhbD zPTAY>ePm>0*|ReGKQ4eP27SEP4kLQcaj|#(qv@CRK|G|GD3rp=Eh@Q8j$B1QWtEzVY8g5Mp#>7NF^#W04Ukk${V_B|ivQG4v@m$rtTD{AI3<|D%rC-9 zEoXQ4SEx-@IU0&8+!bAh+ec3%bCx`sPdCP4We^M;LtIIT0=6l9c;r|(p5I>i#4pcy zJ2Rdmeaux=g(%kic=s6Rv?ly1*3%iWZ9}D$ zA8Wnm{?wW6z=Eyq<`fIY!Jh~%Vxt#v2=We-$l&u1(e@!ABw9 z;3tX}wdzE_NakWL*)ObX!K^7x3bV7rPU1^B9ZH%pSx{5OKBV`DrtSRtdFSvj}mz7{`{VQ+75((eOAy=b!R!-qHQn#eV?)#69RY8CH>s(rb4-^9LUSMQMI45V3$ zmOHeL!QH>IwZ;{RBNRHF1yH~J*?PIb^!;^e>f|VSL;Jm$jPK9gz5K;pw%)=2p}?g0 z)wil*xnm?ar20zeyThyE;=0*cS0$X^nFobFF&?u}N=bRWF;*7J6oGytUYXJJ(BKz8 z@uiPct4&2U<*Xbk+xy#2Cx^D4+*Fr=lU`fqO>nO`B+>4XhAsrGFa8uWlB%vvjvj8wl5*jmz)umViGgaN z7M{xeSWmAI5G$9Y7xM3?gjf`cXgkX2?OWrkPxPh2Q@0lXob8M&8rE->8BFyKN)?#H zPN9tvGiV~FJRijv`xTrD+Mw+n>gQV8+EDJ_*z6j`)ONyS%JeA5UqbVU}kEB#6v+N?tifl4M zG;B$dkuAH7kWfjov$8_U-r4g#Z_o4lz5n-pkGG@eIC|>qzVGk-`FyT%UgvdQ9EUQh z=(ZsDYk?Okt_YYc2td!#CBU<6R+Se>r23KrGV~JrK64-L-=?QhHQxHm!n6f^E!&@; zwu0Ykib7LP1Y4OlZ1m7 z#qe0vzYlY8+++9CCms#WMMaz^V~?$WOHLNo$C{*#br1%(bHK!A`j>p9n4>aSQ^NB> z3>mPz#7_WpeAk-CaY(&EV69<5B9TNYoje=NQ!VSOFn(9+SUab^%l-EDb{pWl*ojgo zIX(y423(Pej;$j`hZ>Dl4;@hxv1a#@Z(X=)3r z#FyN$V@Cq2X(lTRKbHL_ZEPO(2)nMlwSP7U22+!bhzp`y9KgtSD?$eUP^98j=oQg?rI!?37eJ@ZrH!{j-M1SnXcpi5_T{Ea) zI_Te6;g>uI)18`JDIlu%KxxN?Vdj@#R5p=+^ht`l)BXOe!H#gDj)9looExn^^DbWw z+aAXqkldh2ec9?!<&|XrN`YqOCI8OZ9l;WrY7ZEW2)+N~Y#Ml?QKa@e&iANq6VCT3 zHu)&NojA1n20GK>2H~tUwA4~M6%|e4FRNtKfkk`27@8ka^t3`M279bDRJ5#$^oBR3!ElCO@mfUq36I9Ll?#rb16{B?2M7 zzXCC@g-?Ct)c%m;wae>&BJ6T=a!fjZjpVMzxjc7SJ-H)7Q0Z2fi+o*iPD+PUOL5`V zw!F;BC;WQ5HTq4uQX`zZ`tNtVJ>U{F$0imVVNs;l$E}z7>G7_?ypFJux2sL=`DD0H zjOcSy8iO&O$HgC8_H6&QG|JbMM5}Gkw`}(Dhu!P%NkRGhcy3hE@9ORovkz$$^|jF> zzathL%bPvRTGCiiJhgYYZ>y@=CM&Vk^1M~U<7^q*Y+hu$xz=p%wf^S1^cH*1XXe@P zNZqaOwbM&(l2_x@0$fTeZlD9__ni|%MTeumJ=YV98BL=OWf4id-TCJMw{Jjeu$%S) z`LF9$K28e_H)p=xc8y(ZH5}yqoK4nCB1gy6%*u^B(JwBEA{s@9in)uF`y(w_v}%x7yjJ zB%8)6X=*I#{JZX#LAz=Ot@W^F`x`|JN+xq947y$K=&ff@80d-As-@;~INoQ`~-=#H10tdDlL2%j9Dt{&A5TWxgK zTGoknoaQRD=(R1b?Y?7K!p!48R}@vy7IyfcxlF(xlf#N==vZIg{-7@?Uh8&oZmT1u z-Kg?VP`b2mlvKG8eMO_2nWS6k$?%JDO9DoMdz!z0Peu$nvF$XU1P}4iTkauZyX@YJ zS#&bhJP&&5qu!aWwM(bNd3#LUX}Y5&7xD@sTdPCc!frM9b+dtUGy3SUoE;YLy zX{|Y_yI;Prx4ms?Q7LSwZP!|l3i;>XCf}6U3R{cV0RA@Axz4cEyydPds@Bi%ZL}Y* zbSYa`WZ5FmT9;chKTeWNJ?mW|e)QS=;M(<(ktymO_A14l3bP1feW`V#gkJ`@%#Ib= zU1l3OWtnj`fpSStNpD$M%Suzrey)V}%|YeX+3aIlv@MmrA6NT^n+AVmj*ys@Vy^Ff z(6`mt%u#||Dwy(~QLbiaWaQDXs^%L@C&H2{pN4)nEeM{OacwovzA`iKlJ@cHos?07 z*kzr%T>>hG*)#80on<^iCqu?|*fKMP%A>?0(l7%G}Av)`dsj^mwPv z)W<0;zw_JM@Ak54ohRYh@8ndvL7}CP=g)^-Ea~R#hrxyRa56u3B}>Y#C~~5UbXR@T zVzKD6?_2(`cIla&wq@Hr-fFA2{J~ayCDXL-{j-aSF>W{GZXedjXc{`%{p3%2J`rhp zc8uBijFh=7cXU7Hzidvevl<x*zIy$o^LUdU^JX1;=+)o$L-nC+%pwH>(I$&32!DozO_0n`$33Kp*E2aXr7dz*3 z4)tDhpm9A=sS@@FH3^zZi5I7=J}gtO(#Y=$_%Pv79Q?>nuKdao&Ft%E@)w(6122z{ zUJXN4abVT0_|J>#hg*NF^Lz?uPFh||TUf|Gokv&p9f=-^?^|S$O-}aZLx)GKB;riT zN)KqfwZ7fDK5;<#^Gu!Ft^%jI9ZMZ^u^e4K`evK(161}%a`gXx?CaYUw?6eE?{C+A zVX5-bf3q~vxxSM-Tr#gP!QZ`J+d9)l8TE})%RVJLTb@U;Wzc7RZSPUt=~ef!vwTP| zkm16F;&$_?(Z^cn*SnA9=D*dJ@1JrS4Vo~XIvO32W?iKqBtF&W)?lw8k??KWV2{A( zaBKXk%gpTe2K&V?(?Jx@M z9W$wv>ZknVIU@Z;aIX+cNs@a>vsY+vFiQNqO_A-JGASu{*T7$84~SBSFta~dsBKY~ zG)q55lv)n(IEZ;;&PZI`=_IA-r)_5aX!57e?idqM)<{y9AzSeB4?BL{VY^s^!9K(D z{&PcgBz;3+$uh>>=Jhk>Mhee%@(lfv4XFD4*N{Y^Hm?ghy^FCn`SRRC=ULCg$B#$&B?Saj@;mQ57Bc%z zKizMLRQE2|xx!DzPnu1}tRyyW!T+AyK};reaA0uuyuRG+q^0fF3zO40A|Mb8q1EpInEjpB4Z=ZjmM3;=~J+DdEaSEsqKV_FN+o5fPjI>fIP=2z0;g2ww4H!Cu2s3l{y8Ig{CJSQ*LS~@KMuq-d^ea*$RGav9y$?WSZN^_ zkTP5|Q;Umr(66lwqFcVpyuAyM_w$O1EvP~l)kJ_Zy>FlKalCT!04xmqk%B!DYOcSY zaLzuXkeX~W%VSNVpAkMHg?InLX*Um^anIE8ySbrwK|C z4>m6ajpTkZ(uZ$_G;9v*;%FQv_lN-vAvW#?Hnz>ilP68a@sAypIFvHqFXoYzqKIy*xx`~a$M@7c!Pk#9L@yh@uG1)0D-7Km&tgOt?OBRfOG z!bSo=rqfA`)zheWwicThq-w3}eB(}elXs;1w$z~Zk%L`kEoX#<#cP4Ik~lDHZZ=hK z^*nsGKD7**Cvv&?eqp5+)5xg`pd38b_U0^jFiR7`cFhHyg}vX-aqaQ3D_T~xwXw*| z{{7eJP5NP#b!79 z{pGJ;h9Y}5^QB!m^7QglG1j)J-|~ytr^+Mj+8fEoZ=^P=s=wxs-Ia^G3>w39(dj;L zGtlim0@ppO=sXZ@c{8fk`}+EJpjWuEG&v+Pl4Zsp!@Nl6rb(p- z_zZ46QmRrL6HXc~rlFudRo7WyjcfO7gk$K{(j;N>&SI_T>5bmBP3*d+&q=gEjM|Cb z!>vEE;$Bix7}N?Za8V67I(QuQsU!3W%6T6jgH*&P41VhPcb`zFq&)9{E8uTur2GFXH-X?f}3rT z@I!YxJT!?sHYewa>jnC!IMzT6mDowX#{c@druKG`*zMxP@g5#lEk8(cS7zhs(Xr&p z z?Ao;}3D`=NMSf~roTon7{x7q^t|Bgl#99$pCf=3Hl{~ZX?yca6{~caqv@#?#Gyz3=d}S$jUp7hI{4To)rLC+(eS}P z(5b9Fwx*!9EG?$z57T6@y(FC6 zuNh^=haEng3&Bm+fky3Z@a3gUGAyN+{%|@FLl9ACffM{7dhF;?^GhQZSTIC*Ycolb zu%+_IBc4)Al4NNSdAA4uoP*H4Jk#w{CHdL9__prwqz^8A*-GY5W6S!y)D~-(q)=NWr{L_s)faP zR%bqf&(+&k!yk28DE>`U>qzeI;$ES-S^ypCv1Fy(8m*;&sZaJB}+Wu$S z>4nlicDp)KRAcAk@)F9dQ8T#~a{BE;5eg8noKE%avS>w*f}CxqGW8aDWgQ)zFLS*x zSHbZM*V@zVc>+Z#M*)ZoJfp$Q9;2~w!4$}@>8zN-OPRDZQv_b8H)A}`xG@sYA`R~=O;Gb z_$Vm%kqmE{B=fJO#N(jXY4FkE4EBdx*$1)H)O2-gSDTfrf}b)`J4r{bPOl&j0EYGy zk~3*2DsgmVAU+1a&<0KC^54`f^7*~3$os0cct(Xa9k~U6`Sr_}`d^hHCoDSOa`-ir zc~TZ{WqsU{o+>{*vk?_bv4P~v8B+O6Lm?M?Pefuf{dO3~_9FXZ@=;{xf^x2WTYn|+ z@Upfxb5o+$kVSq5TYu$a3VGq+Y01YB{|iS^&LhtSm*nDfSd!f^uISi(m7E+ivE3^Y z?Fd{vJXr5*rl(Fu=e3zV-&c7>m-We5jq1&HG1zc5)_6mi*YP6>*K|Aw?(Pm=I}=%% z#c#{)?5+8(BfVhDrV|c#0cybE;Hj{+UAYLWJp*Ni1DRnQ>|~_UL3kqcSE<*sb2-M> zwjYXW-=`M*+}l!XS3v!r?MbzJ#Di6j4OP=y96qDWT*;E@Bv0e#C!(d^V&ErEY~%W_ z_~8*qL|$M=6rCmev15%ZdE|>0ljfP=JazT+sT!u>Eq3)|CN!~u&ph6F3!Tk~yy_t) z`laV)x=&&vH!4vk=pdr7bYIg@4(Ujbpx#o>Kw0-XMJ3dXf7VfK zbrJ9;|59?oo-f~N`yD)32zW5&`eDLjd0m1HJDsurNV;B$E~*x)ii)&lBzjeNjP??@ zl8lnIr7YpZY@vwlHAX{WC8PFjOpnimsAr72+`H}8kz_uX>z}>2cxPb1UVc)$(AZ<2 zfr#^IVVAe^c(ya<=1X5!=c}4d^T)o=5oE7md}9*&>oht0@ncFMX)U?|XCqE7+pu!a z+Ujk)WaiQq_THc8dp6CdDQeEV;uBSzMCw`Z1GSd}!opW!1N2EubFiK;yeo$o_wG$a zh2)xxHs;$`1bMCOcwl>q*xr7sRnX%@n#9Zcf{^JmCffRE zt6jeQ5_o;p?>h`)6q`3915Ar=Zf#vwOU%l;1lssM2NFsD`yZ~dKIvG>mqaLpU+xkM zAw5&<4W5_S|M^;IP=~WzP%{F{-=>h7ofC6Qa%_f_OTM0pR2=Ip2@ z&_59iW`3)I(2htvj|e;3;lq!maJww}2NVI64_PU;5yyiZ zwz#U-azX7gtSS*Ma*V2~sZ|>CSDFbiQj?^A@0JmlkcdKhfjQQ!19X|=@-s7n@0?Cy z*z}0d*vd`Gu&M+&K-Ph4`ox_OhoG?>sy`v+*QW|DN=x4ZEndDvjO3AlcL3@_GtdpH z&!ouco}phY+3TYF%iG)GZx9Tk}`EB1v;k{Vo1r1nOB2;XlL?`Thdfb}Ys(1Vb)GH`Ym~ggboZdgmh}JL9pnl`L34-kF89kLJVzth1@@+^ zjIT^R&YFmH@uqYw7nk(UAv__;%Sx-R=k?S26>~M@&$1W?HvZ@wS$Xv6uh6GYznbKF zt1;&z13>Npz$&(aDcWt!@u_2<-$~;SJAt1QQ9*Px>f;;{HT~ARlNt{eb=6i0ATodq zkP;;(7e>@*X6fG>wR6sQL@^J!)QqW1%}vpe(@0WKMCUd$PeL<%S1`N6$1mgO~|i|9~04+jl)l`wZt5 zDdZ?Pkk5OxnW@XjkRuWSA(?jTUedL6cvNH+6yzMm_8G7T=#Zhf9hi(8P*H&x@cK1> z$yZKJ&M(G-+ld&OowV^!;nmgEx}qm3hU(AH| z-XqP|^8H^vYSgX>ThK8J*(}pI+`Nh7otP+x zuC{#+RL^cyJzeZ#j+Wj=?zs`u!G9Q^^~-wl{P`=4MJLP%{hJcN;)2pX8 zPYn0MJ)M`A=YV-2Ns%rQc*Z{|fb@x2Cnur-8(}PchVwCz;1cdv~ z-MRA`x~mH8^vcwl^h^M;k*Iv^y@b9^y&FW{WYT;HxeAHz>rsrlpI|7cCjb%R7r0t@ zpFjb$(5oEzXcN^|Y=g(Vgz?ydqWtP7eTkM)ckJ$$$kBKJiK5TOCo z_4_NC`!=MyE|B~ICPX5+BE1go80&X&#?J*0piy@0_-=tK2wSjfBSKw(d5|}p(4Zx0 zm;QfD-tqHD;iz6QbZevY{we^j;@W@+`h-t5adjgXqJ-sw8oVX)@iycF5LXFDDwcdj zyqJtkirBKq2;nHBq}byzDh8%NkzGdrQ|qtnjRFAyWv74sEGMtrzT%+p%!s*uN!iFx z?Q2?gaQxg;T9$*!Dvd*S^54+uXMbfyI(JA|auRzE2K% zsuWn7DbMpHubM&4dlzT$-0nS?+}h$zvzH1-rSw1;QUX0YGJ5sX$&~vVHpqjMF;LT! zi+x}z8dJvUbZ!!Bz72cp5VS8d`BjA}=?{Umu32c4qIUzus9YRo4xIy0xtDdl3WfvQ7H&2qQu9*2*8k+IVVCY%J;pNJl@B_7y28D41c{%$|1Q zM z`Ndk~#NDI{StUh9|L%tgUS1TpYk=&LNS=%gwUh%F8jr$4$?=C&Hr*~^V@xbxd?yhn zT)2LN{MnPr>S_XPEAsFa|&O8X!?N^DNa@PEt|gHLstC!hq6aI4x_2De|3G~ zmeuoY!SZDu^Y;@j4?k&LFM1OB{(IU3V^90FNc$fLQ=8A%AAd!xC1&Kgh8{e0Xpc>Z zJc;SS|F&^*O~$9bL_N2#&3UV4|K*Iw&1Y5x+M4+mk=nc(QmsW0RbXYyOIJ_D$i0g_ z$791Ua!=T6BYUo8wisR+kBH_=o)v1z9Xcl+DWcgGEW#+}V#_&VdFq#q2mvz5OJDgP zHj`WNMKTe?kc~%PV>iiR9=!iBijggHwswD+z+Iwarv1%LrP7vN;@mRnG%Q#u{O;-x z3k?nWC4Gg;Cbps*1XP&vuZS5=#0GX*XrL`Cj-Cv(MhqNBA2JuV8FV`N_i2!1eG@&4 zvxc6^dfa?lfXgNAA#w6Ex;T31O@8hN%t?ehqp2{ZFC|#O)b}qVg*X@V1Pm2x99ONw~aMb!6dJlhC z6ZmyImgm`4R`wULaS!Tf-2>5DdCAUB=pIQf#hu4dO#UInggy`4*mMo8Q4;A@V|F9x zGHXQ(aSTk85>kkd{e}xp)F1B+bVjc8?As@UN^~PK+g$W|!Bl(y;>zW*dF|I=7!%F0 zT}-MDb`HK2W*W+ou4mtGa(@XRcU5s3zA63c8}A;D`1x9dvaChl-QB#?xa&OWB$C11 z+&m^_H`d;4{=8%CX~)<&e@(CD;06a1jn`6setxS@cUx5OX%}=^M2gxgF3*iM9&=l~ zQ1e1q1+kN~i~QKut*pLsYp&kpK|PgUzQnMc_@HcJa){;8CKBJL`C7Q>=TnrWft-%i zTRc(Df!lmFH#ZlmkVOehe1Ac$7jW&=6z9uMi~RPE4q$}|lIx3g z$aXL3=tR0sda_ZB;Y)Km@I7B#+WnU*4Qp!iuzhgF_3TSp1ZRXQkVAeN@q#h1JJ^B< z*Qv*P3p&Jkxr}2A5O4=0V;sm&!%wv=8lr&`SyZjHx~+!c$?`uxNE)g~C9&uPrPezK ztFT>xQO_UGHhYbtwWu+yWaS2;p94Zd>hRvJb*^)7cpvT$0KIJxV!o}A*ac>i1m#SH zaQ-v)omM_`6Wx+M22AH(?Af!&kdFx-_E-`W%sAIbr>q9|Tzkc&TlS0n9X?ATQT_VCG*beO4S1~c}upLc!tpI_N zFkM?6O10mVT&Ee)&|f$3x@4jJm`$U?Ndhf+m6CGV)HD^Li`u12F>WgrZcPvbW)7BG zU1}|9fy_-PO6SAFCV=%e$aF+i(loO;??UWAsD^F>JZ{WJsk9*un3y`Qgg8jo@%IiL z+h#Q)(A$zLx%TOxh=^t|@TwUMxiCR%lM@7uY?o$kVISW@8`@I;$&wVwm0mis#~_*h z&2W6-LbV}3L0m*7LHvFEoEN_BtU!}WtdE2PsT;%r@8Ld@9JSBKmjEzIzM>hQ-%I4J zz4t>baZYY7@`w@409Lth;qmy}K3Y8bj8+EBE1mdEAeNI*ktKL?U_TN(K*$hfJa|C0 zihkc#f)jWH5BZ+grN!Ynt&a0pZdXeQjxB50n3BBwL&V1^m~|%pr&<8Daa10Fxk$)p_*z#5DR@LK=*l7?s30{wYmHiaV=AVx)Pr?G30Y-?_H-)g7niRE*-R zYkPBZHvP8Ch51hF0D?}~(xQ$PQiA6U;uK$eNlI-=hmp72`(}_wCjaR)&9EZ%w4(;(x{IN2PcE< zU^Ol1@+vd4RdVH{q|NTdBZm)Ps1y6+oAG|h1o;ZhA)pFc;8QoCH9nI#1OOBpF^76j zptU8M+KElbBf0o6%6VK^O;vTUy0^An<>}(qAHUB{Qq%=`G*Vpin&NaKA|HhA6EjlH zJyyU*Gv_nA(~waU#dWln_fbgGs2zRvqeF@vvHE=!!#=UIlF=V;+Pi=@%5 zP5M7)iYHt$TV1B-e_vG2=V)SNWDJdnNCgdagm^XNEi-+ut9U~=S4T?LQr}Pc-Jbbg zLbWyO<;lS6!U64T4sWyMl#(Nik;bEFV11ok-Xz&XtvSkCVwH@Mb<` z{Agrwg32~=B!NylFW1~MHQhV2M$N4V72d8y$J(>(J|<`rEOK3Hj2?!b-%FG%3#Bn^ zfyVrMmNZ(&y%9zwAzK40rb{a>DJ3J6qK^Jeo7rp4bR3S^0^cMzZfZ7E0t^iHt6e{S z((He& zs)zPhMDZ{4JvO~2VMnzRlcHTUA*9;qhMBW-Z_QLypI|!@1*T#fea|4Jd$rH zuhcPSuICi-qvIXasBCR)W(xIoEup8=7Ge{_kaTe29jJ1h6*OnJ8XkG(5SW z9rf!gb?X}&tu{(Lb$u~D*c@Ih| zKSboh%(NvYTL?3pm#2;*1E)Jv-k!aCS2L_YO+$^IieOwKE-!i7O_&VWB9B%dm{97$ zvs>k%NHP9xmU6y!93U$g-n9XrJpbO2MkBuXD+D=QJ=QP1!=gyxW|+Z1nde}I+K=FY)~b* zBBaX&gC2?%lEiQ{g$7r5XIK>m-t2}rYsZa3#Ah92>MANBm`PMvEebNc(ug9e=T1(A z@0YtKhEJ9(`Vo>2sKK3v`oV7%8m|X!W=}B8sT4G8p#afT%F4=8OvTlbL0Vem$|8LKUf#8MTjq> z@>gi_2iwq4a8D7Z*7Jx6E_tM+T~F7Cl-!0_?QiF*FNu0feLBb_#$wLH&21MxE<6!^ zcUnH5A6f)Yx;qvLe^|0Mo2esrY7-wnI-mbk?es4G9~mHpOAHk$P$WVr^S}9DWzQat z?F^$d67aguj~jRs#3w>DAbcLl$yq3c!=#7J5LO$UcmPAHj*bPcGjVJ~_jl={O7w*A zdKM$IO;l}#wj=Ix4~MG|+@!)W5|+j3=R+P<6%}aNB@mIH-g0{@@Xn?Ly1$g{<{@RG zY;KFiApIr(jp(en-8Wc9QJB_60qs4dHqDt<}nITL!Z{8mu7K<|L5YgAA_^cb!jl@NZTkbsa^K1T7 zCz%N?l*yDWGM`jT%cA|mA{CSHy6>9&wax1*GHI|ARtodLU9v@tS)R#HqEB9)BAOO( zUY6&tFI)}#ODcIh5;6IC$VYW({xEtYe4ys7^JoY`i)sPsU`y^#Jomh?BtSig16T!A zK|V6QJ^ywzGHjws3AscBxyJYoSlw}}E@FKVvhZR@;70aW6Z2h$Oxc{D&qIG@p7=TY zQ!~d6J`Y0Erx}4_3Zj6QRj*OVLrO~6CSSQYqN0_WQFuPHcu>9ZpI5d&le|*HXeU7kSJO)WFfkD{E?K;6S?qL=;Ss<+s1S@i@<{?Ze4K z^fKbJj4jl@^x6@CztN_;NyRC)$Y#^76I~Pk0NRm^BWNV)T;e zPS7T6cX}OAh+)`_-AN}RVVV`WrXK_izL)Rc zC*qLX68ClmM=_A!G|h~0)zsicU?97}zWjYiq8lJz%{0oZ;wzNvyj-*Z^yPIfPP-|3H^i4+^K{ z{wZQxDx8d4X>I3l*;Z}0-0h78w3LrG3fi#x!(Rzz9m&@Y9`u0IMb3Ln zoEs$&vZt=)f_~RPOyZyy;8lJo&qzhVcb+I?q52;0D+_W}$=7&22?Ybf>)+ej=v&zD zow|C+AipE`<{u6MvXVHawoxTaNfl0S+2VbP?Fg>y?*3r+t`vYs2NbWaL3bg3^_j_g!;_W8hH zRFWN%3lD<&NM4_YKKiwqAm+w zX;P{F?|}gnt-ct&S=1&B4%wj;XPcW-v zOtaX@Htg26ok!}C!8f-LyR9oy(lTS_icjH1XZZ5J85mwdoP^$z?Y~5!qD{Zk(vA>i z95Js*q}71@O#XmT`n3FdL#FHOVSvy0Ju!1{rtyG0I7C|cw@TJmCs0O-&vaWIq3Arz z;01QiA?(wf;?D$+sO@)e^RCUCHy=83gcAjDUP(zk2p93tQ~d=CjCgXfnes{I57%XL z7U4UxfS_6K&22h?`i$!>bMt2Oi_UaIe^Mt=DN5`rX8q)LkC=iQh~k3H&rc6|8;P4A z77=;wzW*2a48QCLf=Kk#h~LqMtvsN=1;|0y(z&TKW@c%qq8G1&1EhwCXMP>7InHR` zk|21%L_SE+xQvYYV_FB~C!rMR!A7x4P#St5`*sezbI7Y?SJ~8^PAq{XxX7&%HhV@+ z?hrqJu>K}e{bLYCHUdk60R&9?DnKbDw_zKlEDyaAfH&%>cAS((IsavQyXM%BVl7mn z@dW#O1~pJ9B)CZO-(t3lBlqQW*m4htz4RD%D3<#hpc~t^8)4X4j`brmVpJ%^;$bd> zfWp9#a{#?oC&P5?!ZY4YpEt+wI6WjKo0OeDe7v3wUYIe6M%cwkJL&HJOqnV~Jjz>V zzb0|BHd;~-{N5UOH!0@Xx?!vrU*KL#_)bW`C4;x*p7y4&P!o%!0mW4ycMjO7>`deD zmo0!-UYa)uz^dH>o3Sb0UC&G?V*K47#bOAJxzfF4WnkbyS15`=+r>PImikWuhx|Ig zl_kl2@=V~IIQGCZr9nkUCmrO*Va$nXLJG2#*EH6Uu00>3u5{0Rz%I zm=qC@Zfd6{Cwc#LMFzN1hv18)$!QI^#UK(uRn@<3$uRpmwRzxjzF6cDC6w?=HuoeV4b)ls?CIjQMf_RN5FRoeP*DzH@D!qHO`o(%)abW zI>6iI86VjXr9`X<5Iuk(y%htFLG|Dgz%RTHHB>R7zpsoLqz!~ZGMWMo4K9Pv$t2EO zG|GQ_uZbJbv*8x4+gxRySt_ck@lgLGCu4tm%pQE}@QY5EI2jH5s{==V-ln4qHrfqX2EPCQ~Q*d4UqSu_Io={8?QIsZ?FUa;$YQTIUvq{y)! z1q4j+-$vR`U>txCZV2uHz^E}aU$X9!Z)?%^`V5|{2m(&85U7@a`b3-nqVpk#AQ@JS zVfCJHt}fQOsq5-S!5-{jVM)hb!RHwya$_jW!jAo7q0Pqj$J87GLcm!L7!kMb_r)F} zpkWx~2}(XFSRgvO46E|lpCy$@1AvNrLq1yggl?wb7KRXRUq`RS83Ci}z)Hj5W|L-O z+fBSKSsl9_8Xiulj_Vv1p+=NMFH3L%?o@I)Vb+?{MeZeQG16VPteh)H4jnx@HedD{ zNe)Q#zOnZi2zrC7J?mA_jf>~WrT7Rt092t3GroHibl%mAk;k34i^Fsjj^%Zn zo21@s?=F9b&M=qFWhO{eM5I0P*|Winw6szFI*a@w$j?vs`}^A=>8SvVXUr8`+u{z9 z^*Sgr0$uM|MbKUV;VWnD&-#3CThp#K-q;H-W+6Y0PMUd)H2r2!K>P`NyU+jO!y&6R zpdr0F-4m|!qWP$jSP9Ep#5D~501F`I`YtxMiKo9Z00LItB;~fQ9cmRy?B|&}x0#jlgH;BW}N! zpCnOtEN2fWVX9x5%%zpF+iup~&J)GsglNP#$X(?8vfedyNg6t~p=*#pCSd5LB)6rc zKOvV05q*oBLG30Ar%NspLzLULbMZkKx%gOWyroY z5GXT{IKm-EzCeuC}FKsV~VZL60W`v|ma|8|mNpUAkGM)2u-!PPXcGPJ+2$ zl1Ix$Arl&(1q{oSqo(4Wio5WIH>|tlnD&ou$(6~q={eoW=|0`@@&N;bqWLK6?#y}N z%>2%g-gk9`)M0&V>&>}kfG{Ne4_8%CrzN=qog_ggmlBijz^sKhpc6F>%{5C)dSrXw z!8(NKBi^Ct)auh}1qcHptMq^WUP4ev2x5KYq{)V}Y`s|eGQg(|-QC9yR(<^%oOAot z7)NYMxS43{)Z-(Wnz@f6&p>vmx6VLC;5S)JQBlX}eXEB?r>|ULBDRi~w|A{WkmYl) z^y*&>6w-W5)E@-u#P)}~Sj`MxCUeiiEJO&;KHG5&ykHVL)RaT4&{wdTBm?hVX@EW+ z#}B6v*KptDAft!cMvV!efVG1p`d@bzeU~!8ow2mD+i5U?c7sz`zxt*o!J*|XTeeUj z5xWL=DPv#|7oYa#jmldHlTy&p(Me~vZu4M&AP|-)^7Zz^&iI4`dELykw7;u>bQ!;{ zE-ybby=2pITDEa{+fUGPc*D!AfjipKw(Y3>#j(%1rLl3pprGJ6*Rw7-xiXknC+cI& z`uh9Ds3EiH@tz)c1ae1o3jZbqKjG2{-DZgGV)|=%vBgL9s&fgbkN>@Q?>5rgJHH=Q zPNR8$Yrra66?PX3<(+rX&g z9Y#L2%q%SCuhWu}&Q33x<@r3|Uf=c;FhtML&{ooc0|$C|H4Vdvx#Ostly!CK ztY9!66e6FjU}j;q7!<{!^BH+o)>;t(pU_z*EyG3B@vaTf*Hla#U60^8o~q|ned6*U z`cY)hoOxJx43eDiI*NO4qxBr?x5v`T*7gk8`wiLfkyhwDiSTV~Y}{0!ZPDU5Xd^2k%4e0`(Xx3NB_(VCzk=^`bhNj}G!Ajj ziiwH!jE~pS?%x;D)X{;KO==r^d+VSZv5wW(J;elsM|*nMS5{Y{@zwkL_kEznkUIW8 z>HrUbgJAFX2Bk*`%Q3N3;egicDby+&Ka!c5SpgVuTtvk%Z;RtNRG5i@w0%e*n3$NP z}Mh20~2OB`u%w7FQdF_b>|Hc z!_ji=%u}VO1?$>6FGe9c-j8YC&?$UtH?{{8VH>`E^8n7T-&JU9RIc^Lhz4xsNrb8# zoScU0wJxs&96P(Z8h-pB#3tid3{eZnMvrh@pF+aK+UmUC-x!YxJ6Bs<%h*1ccs-#gl#yA49P$?5621)UdX zonEAgLjjHWl~~*fF^KZix6H*Tr zmV0Dvq(nyE)g zf@JQVP>>bL(UZZcdT0lJ6Y8mEKi;*ClChG$zjqNOJuoG;owhJ)|5vzwpO&GDrx?fr zkRO$U1}4%sik>-{QiOTA5(jWh zB4N@GqA+dDUS(ln!3ls}lf8O%HwH)QIt_7=O0gXkSZ|tzrMBw1u|W2h`*t((fcri^ zln5LzYiT_$E0e}~iFx@_S}XvdX@`PEV#j~Kn$fgf`r4YSRqDtT_6Apx95{S?n%gjl z;fZ1o46--AGiJqOzA|jxL)a0qwS~Uxk@y2K{sOJ_7~+drEIaFdqfiq#N^3jxjf>=d zK0yW}c)Wdl6h6~XQI(=@oNZL|e0{=qy{lco&UeaONKJ^FpI`Y%>yXU>s0M?x2fm%( z@Y2FU4;mX5u(DtkA9$b9IMmbIOV%@&ekO5A2tRPQv!%rw)@#40=yFE>KB9q^7N5m(DtTiNxAvO-6ijY3%vcj)?Aq5Us`YKdY|63&E(- z-mR@&(D`?V1b%ch+lLPyxT|ZfyvXk8h1@GL!1^CQENsO6C*r_Hxs`jfwvVEa;iV4P17o|-N>~38tsH1bM5ck>q^2eQcdp*8`7pn zek^#WK6=n6Z>FG%ncwH>u$I=+xZ#L!PC{K*!PjdT3AWYqU_ zac#yZ8ptXC6h?b6;w{%wRWGQ zUedDxhJJ}@A85&gX_10`|ayKz77P9LoGNY;WyqFwxzjXQ_ zKrS{@fBM|Ti?Z_aTXAU-QPJaKV$|Umy+Lp+>+WWQu=JJe>zjcdT+BbankPQB0n>K-<4n*O}(z?H?HGT2@+0qL6kE3)|)4={beiGA=ds!kt>|`EwYo z3O!!JWeg1+d@3p`N=&nba22GEPeTw5-a8xg@Z%>D!VL}%(o??Y9$ugFbO51a0U00+ zx8G6*7^-*g-gN$KvFV+b^XDzkNzZ%*noZB7Ht%Vknw}2MDR}+*n{kqWY5DP_nwCcb z%|cp-X{{Y@|FY!G+~NGSS6kTmJ7%U zS;4vaghVfP{kV# z82@0%<50BopEtzNAeJRXPf@0NPciAVv$N3%F#h3T9TpIE z8BW`~E-rXEJdR5C_WALOkQK+t`80q6aqt~zleMvS!G%tb?1~u>tEJim^B4_tR2(8YtfnyS%<+w9Bt0eo9U`-u{J&&U4$L= zW!-0bIkP}`6n6i{4KDPH9Y5(Q29D%2Zkc@=uaK MqbQppqwn#*0N6W!&;S4c diff --git a/doc/source/ceilo-gnocchi-arch.png b/doc/source/ceilo-gnocchi-arch.png deleted file mode 100644 index b513b6232a09a5ff0b41333b248c6fe461464644..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 115795 zcma&O1yoe+7d|?G(jfxUt#nI>w3JFq2-4l%ozkHqAstF1-3(pQ-QCjNao_Q)-@5C6 z<9x`sI5X#*H}-z^v!A_ZLgZy7F;Iz6ArJ`0o7ZCRAdrU!5C}pMG9ox5W!HBD{)eb1 zB`F5Ehy9lZ&5j0VP;6eS*+U>`IIw?kkc1=xa1zPkjf^@%!&>S0%%_r|xbH_{vtw-Axc%t#W0mMh#1b#lRbS0K zBIJ6iWU{b+OU)m}_e&PJkf-sL%~U{(UX0>$W-Nh4UVX1-JU-L(8esho+{c zl-jKvYBW6t-r4nP=O@RyxlD!}D3p}PdLI2U z;qK1s|2T|)?gK`fG|oXxOguC(fdnBSAZStwJ#yhEtDyaO-gB9_g@DJdy&xjF2t`S|bp2Ga-&Q}D5|`9wxyLWK47sLcO+$;7W$AMX+NKZgtk zV(GuTE2bFW%kAya!Hek>h&SUKtzH zLWDDigo`H1-oYMX^7hZRXQ*XXRNz%qRInxX!40&xx2LD2K|X%`_*zyr@}f4Us7TV% zlDR;|yvg1TO7`&KLpnM-VGRw9F1rGi?Qj(8i9&TG2ptPcN2TQqO}$54&qwEZS0tKf z6sJ0Y#mk)?3xWs{$Mdvi2(+Kt&V?vzW*LW?_vHJYW`R#c%}a4s3*@if6hu$Ss9O?3@eg^ z_wnKNr!lkkyW8s}yBfcSEHI(M!a}xh(k_>4agyj|f9$!#@6U?$nK;#v24RuM!Wq1K z_ZSroEq!A9b8<39-5e(PiT)LEb>#+`pZ8PA$3wy*3$MPC4=hnrQ=?;I64THi;UvPC zwc^oW!OzRjADWs%shf+uy*^**<5@sQ#%E+?92y@FEGQ`8Ey1F1o%HjAUtM3n8cm$D z>YOSw9-5g68y{C85%eIy!^5-ifo&%C)?p{Pq=N$|IwHdG@Nm`%l(6eDHug|>7P>wf zDyqDS%KEW~ogK%Q=x8v=2+NturNMO3&7GZqDT|H}9NJ?0jsDBr7QvU_54aI zDj5w8T4y9F;gk>~*g_I^04r@V;mm7b;o-qwTU!Gce}x(v_r%xFR9YsE%*4ZNq5KJp zMzDC;6H01o{q6;KviwwV)`($f1P_^181>h$UpYBB!eG51){pSb;{B&2q+dPMX-lgq zD3Bia-{j?g4-5zn;_y;)knPDqq-ep8vZlIvkHl-(9C0EAV2EPU;kJ?N41o|u7`3sf z1c!x*s;Lon_w>NIh;kXSO0u{4N@V(K6$JrP_4E{&wW{(cvY}~(uV496SyA!J5KiZG zYfOap*O9k11emIzNWN^gsFjt~;JKn#|7T$DJzAxA+iYxX5SV{|O0SU3Mx&dss>&=Z zWY&2M_N1WLEoyq#DZbAr^{TJCn`lH#PjAlGT_CfSa!V$2DEwl2b=BbMvuEj5RXnTP zV2h`xr#&h)!Q!hFX^}e2Tuj(|dO~mbfe+|krKeMl!+cpLEGvtK@M+-ISPl&zAMwj%Sxn7E zyBY%<1CDRcBg*TCOxlhIgvig@)Vkl)IJ}(8>2L;%^GaQvSn&2ladP(z+kbW> zTP~{FI)&ergW@}4y~riFps03$VY{ZGVLsc6MXNp-tQC9hgXcVneK~n~ z@Q|dJjtn|=<6~nFnMS6kL-X?T(zb7+?k}S9u18*{RumV@JHkUh(=1EL%65Re3M~st z1B1a0*}Km6zRLD~C&=b~=fR{^PXK1Dy9?HDsNB@V zFW)P5b#<8c<>b(Da&xz=*}-rO?3K$-NvBTLjP3R~v&Y3!kWJm3YK`3wxf+%@v^wYg zSm;B?m!#uG+EDs#Y?i1X~ig%pZ}GGRNWVlaiB10aS2sb{5SZS^7nr z8X%kP-?Qd7vm3kJ;C1VkdOmK#S#7t53P~#|!M3U*1~F1MD8Z*HF4rZssPT@7C;~A` z;EVv~#EFSvL1b>oKF$SW<`X~R4iFP0GO?$orUti9FM71^;HzdZs%9dvgl^ay zCL6sQHMGwvyQq!)JXyy=wkPvxIO^uyjje3<;vjun0K>Aa`)R|ogQBnT^`^(Eqm!n^_ zhf6`^%j1RWf-@}(+eM9$;H@IrBf34Y%!1cXe0)j_WN2_O$puM@`#~(2o0|iRX<}gk zR^#~F;HyhDxP5{;a3{f300wwaC=?D_tL=4ZNZ7#;hm&6BB9}SzB5M~GrZ};mFsb^C zj*ilMYh$Xx&OHB8YMd;aO?-L;CdL8T-q}Il454+qxs*FnQdOm+b;>I&6xY>Fmm~1F zIMA)x0mc#)LCk|#aGow2q*L)hL0Nh2IZZBwTl*F3zFm#Hvhu^SDfIj?qCqs3qKVAv zYE`eF01QXR$GhM3m6nx#j)}qJsOKh%U?Yl{tg2y)@FYs&#hWu>{g9iTU$OlYB2oj39Eezto~OtEu2f|t@5_3Mb*sO zw*TqknKzV9vG6W5EbMS3^$r)TqYFTUHK!BppXKGF#-6Z1dkzNz)mZ-f9>YSFe2Ydi zOR;ixU^ZpO1Azi0D1Xpr@w*mmrB`HBuxgi=mwnjSSXeMY+_9@40&nZy_$s2PtE($K z_`tIAVjAZ7eAtjesdOo>MaLi!q%4A7`t+N1BDe7t8#yJZ7sdB zlG`LFZ{)Whi?+Nxh5ya?__z;Q<>~3E-91XSU>h4^x|{3aXZL{fA+@ae{vEN3E(kjC|wf7UG%N*Dj-Zs zbch#`>WqFgKaP%$ipQyySVuyhTDimQkAz$JWSHxlG@z>lj>sC?+EHM0L5vC%xDhFy z@5)?SC_4cQse6u0%DncX2yjBW)(((|3Rszwo#yPTG;S^eeY1udD<&3$JDd!9_tuB@jenrT6b!*X(ll z++QYJwFzsh8D}a32<@~{mUW|npSE%Dlx?d7GeYI#R1O>mK%Om^8u-iLmAN@%uThzY z7J9CNx_bW0-^71lM4tlYHP7E^X{8K=A7^41h_iNn|Nfo*V@9{|gpkMBZ-*Clt6g8m zJ3+c<=n2A20NzgqYtnP9_oBRDlpdZUB5-#0_JF#Os$h+UY#!#2Jg)?i85lc2LEyba zr(W`EYV_wHYinzZV}JGa#X!UO`1la!%ZiI_-ifw>32i#O1UuJa!3|tGBRBUAV!=|w zxvD!ZV^un!F8~iaZq#mq#vTb+iC+VXfyzq*vm}M52^;sq&dxjxjEoBJ-vAH2QHfhQMm^8BH{NeX{`|ufXWwTcA3k1a$5JfvCP9I08DWpru77FW0?<;kpVHFy3huGQ^*`j15xnQ5%3-v85xmOoT{LW_4V$~80b6E z2ud*7(4!-}n~P4N4uA}_-&sJAG^{@B=i3iZ#_x7Scdmvv93(PR4b(~1CiDp+a#sqc zH>>y{dbS!K78Z^3B_th9LUbzek54_UrMgiXSJ%);y75hGXb?2uRaRDpA%19RXa`9* z?6IB;1{}X?Nh}96O75s8F)I7ozuTDgd?XUO05KHyR=`4NwX3Wb1iya$x(ISZk+_w% zKLIVS?@v-ZBps8K#Dt*yR|0k&VE7hH48&=0Bqk@`L>w3* ze^OXA|893`!>(p^5dN+wDvnMn{E&gh^&QAL8C#_yBvx>k7%$~?(sn!vnS#qc6uN+f z#n0+Fwgw_vByis~C-iJDu<5H;VPLjpSa`U&U8i8roCnFifv3~TCVe_hh>2OlAz|3o z4!7Kv3;%|yqk;1oc4tF;QW{nl+lQJXK0a?d|Q?8mQ+-0P9?C z56j!nokd)V?EFK6>0boGa{N9MVje4yd?um40U4(4*^XMOEo~vl$l2h3{P;l~$zxVE zQ{sMZh5O_Q2zY3KFn}->L#YSgT3D{Z9|=qEd~eRCMuU!(HE77>EA%~ZFW};g{=J{! zmr6#(jg7N+(p{I^P?BEQz8e21ktqVCf|q=JhMFKM@{;Ah11S)U z=sA?y-rmN>!+VAx(=@}UbP|cTGIE7fWoT%4@mtQD!C#*pvk;I=5I52JqrZG+^@sWbqmcrhkcR=G&i8DN-m zbDZIP;Ei9s0RUttQ1$lVhbG=rkhNrF2eF#{ZGVSwWK;GL~y?jN3FhTZ+4`RQg zCXM;oMQJGqz%GEtJ%_sfrx&BY9q5>ul?&0Z&*08G+_)0^09yoS#9du^LB6}Sfun2a z=*R_d0W7yiikz&h1W4QpGQnRy^YwajEf@qV%ggc_8cac?AiY9GmKJ4JS5WW;#FjVd zBMAGgG&#GXAOtCCR)I%@bmrHhrscw~?(Q!|#8`&G^k)+#kD7~--vcvhonC0afC7gB zhLM-5yo1*5$pi=@kUU6AB7kbqy+j8TfZ+8nbWxov3W7GbSB|bT?|?uxUZB?c0fsSz znE;BwW`Cpnv90Ho zxzol^41>w)mQ_3|ieDxTcmkLt6vKb=B=Y46u7K`XhZ3IS6WREp0zO0NwnJ^2h{uam zIZJPR9P^LU#w}A-euc-cuj{7m^t1_bCw_h)MsdD>3fpVxh=_>I?QMj`UFpxCKeHR| z_x%w^*KzOv7D?Arexn=k=Hdfi{OcIPT_X5Ti{s4Zzq?ooj*JX!))D^jVft$W&;Mwx z35tJ9D>A?3AHoMQ{U;D6Q2j+o?L&u5R(JM5m<4>!5+>tHUN$u~L2B#jbW2p(I5`78 zqT!eRr`OVDFnuUT&VOH&a&(TdNeNWVu<&qnkP4##yj0QL1Td2677ena$P1$XPgNAf zP&O+~^vU-hKXeb5+i{L)qWF)x0*YSzzpA@H0gbHaMIv*7j8K|TBm)1&=>0#4!2hGZ z8hpc{(?M6GB1+xcvm)VhLLcBiMh8%;O8IBn;Mdc+0S1bM7a*$ygaSD*Fwo}e#1x`K z2csWftX{{73yVN)ec^e0ts)+FhsbrOg%kfy=GNB_5%Nc=vQ!2fsdogRf0rv&x?o_lA%HA+wN(g7I( z0ihtA7ih3U1OdbysOW}&OXgkL78Xoy?(T=v=0)sZlSKiHL=2m>s`{q?qt>EWT3Wt- z{krMA@!m7A)E*r{Ekl98oPd$->)#rD2FMOhPEH^YU^iXzPLUx%s7|TTLSbfR28?NC zdI^Y12ffb+D7d*}-IW7~tO@dQNl7DGT3*4-Vy4Pk5-<=(n5qX3x|ZaZj;KI)29HE` z*#n??0TBFfSc-KC2-e1i;%tbekh>-FJ2-96)3NHF%VsPupF;I2yP{BG9K!#Z*m|U5 zwk*&MRzhe~nudQ!`8Ti#3X<{i@}e7369h3qXr6{YtWPHuPx*zc4~MA@%jvoP(otjAy7$+m#J_worD(Dt`~E(gj}|XJ-jMX@$l1W7{l?z=+sqJgMrwD0CDdI zA}H(-|1=a(@(NrJ5@#3&)q^?!3DxtK;-G@m>U&&)Ja2X^j*&>EXwnYU9s0j*1^3(4 z)ztyeG9nqm@c<1U92;v7X`p%pyb_Np+z}*=R#p-K%3_(+mXEFZZ+fJpr2#%Y2S!Km z?}LK@%_0F34XGmm8UziWr46=WAP8`LjbPiEfAf9$ed84#xB_TI5#)CVL3u^4`_S{j zp&|CI(l{nHKMZ2-0FS=kBiXzTaDUBZhzaC*fhj3uy{SUpy@tW^XAAeY3ouL3-Jge& zG*G|;LH^w#D0BqgtmOiHKRi5Kaf@PBucYAOQa%%vi(d?X56B1KeU6Rg{=NS>At4k1g9`Uwk5>qIgyV915G0f|*slVc5SI6a zsK@Ep`-}BNYn%Jq@Drowk$-aJzqU@6D%^89gqCKlf8z3ZBeSuQysxhh_FmPcG6Jxa ze;*mPz}xh{HJNjY(aro>Xe0VFYgiEda}Z@p#j#ullmLej6!{HGEG0HeEhz4di9FWJ z>wSqAeb!$7AQ;P@aH0LlWdFG;DTxFu6FER5WXK{IDFSeF53MrWl@9he)^0o?I?~FZP9tq9#AxS#LEM8(9$Uey~h(9mRuv|g&@G#S zK`|J2Uc)*}GF6-cf*Z`6l;fhJq9i>$_<=pOMEqm?G2vS#Rk6~^Pi3?s#aP|eQ!ub)&~$#47f99hs>Y71d?hx zK%$52%IQq4s(2k|bUkeU#4M9N7?h%}U;r)f zZ^y>&vjjQ4glWP5jED%tEUUEK+z>o$UD!C9O+^122j>TL18N*_Yj#wg|DMaq16DCn`LnISm7g-x`?0S1xHEVGJ;VI}swin8$IKn_jZ!#+TczXp4hamM} zvJW@3nl48n_dLN)aal&0wBR1!;QacAU!qt#mUo1;`}*pt#mf`G&tn`MSY5KhuoPMX zUgDW1#?Cc2Gi!-qd|wQ+bLIhIZVvWO$q0Z50DTkhY#ZEDVP~!`&M>k}x@@ZVDk+O2 zq!Ss$c;hSa%e9`kR{}4c z`{OxyeyWCl6(6#&hW%M`CLt!aq))*|TWQCc@gyto_NY0&{lb3#J*D<>A!F#DduLhM zqR;R~VZ$y+(Qotnn0X~#)i9Rk9pHAgu8skS#`COR-)3uwSlOR-m;md5{gJtQ<QSD(8c-> z%-(#8j}QL(6_=BX3)BIS_#3U}UNWhbirwAgYkTS3y#W3T+KuG9`~8wY9bgOyb_u6QpjoJOJ9O;axB2;;C~7NKZ+pD30GFzgCxe%j zl@$vZ63{?iX=|r$9fyRR@2!Jurq%O&ZsRxs>}MGjHQ)zS33K?FRdqwo#GCrckFcY# z#YVc1Pc=Q<>xk#7d>&Gwty*h)?syGwjEE3bf!65(| z&ccrEABG>~0kbP6`=cHvdiujlZ%{es%oi`ZKvDV8Nx8v?Qh-hcvlgPhFu7MJp{Y76 zM`HX5M-*Vob|Yve<6>fLBJ2p`rovezH&MD^%r|5nFoRCf|_6Xs_LHwl=16?Vz?<18&@=-}b=nrG?E)w@+y@B7WR4c(14# zK*D&OEvvC>*SC3Eu9g4?$qgj2;?#4e2HTLJ1KY3PE9hMQ@vUpYOIXm#G)Kg}^&zBb-GA)JRx<8kgt68`NP+8~@n9Bl zb@Xr&uf!+`L##KkvvL5Q4#Ui_K?PxU>yu)Z=IKg1DA7QDs7RHfV60*}lE0O4*IHKC zw6T&^bnlG}+p4vZe>^-PAz?A*=~4qZ!MEaE+57=SM-1v24vd7-q9TItRnU8ouAlnROFlX4B-kH+0zTuVaX;b$5z=Q`O(O6&-nm5a~WBDRT? zHBxCz6ie1?QbiiOU(j!_I85riRQc)lGco7*<)4{k!e#=VRu^_&EiU};`6+e1tD+TzKtlm zAZq1E7BrUk4anK8h>+lkEFH%ovZp4BpVMS~M~3{t{@e{NI!wyjyAX3XdN|3>FX@(X zUZ0%MqCV3~%$PP;LU&x*EY&edBbN=rh+}kauFltc=k~4Pa#&IS)a!Uw;bVpy$b;qb&(hRtkYRuG1$OBMWp8F(QA{dH7==~>JP~n zKhXs|ur%v3S31-Wx)|zpC?G+ZRywPvp;89gCCGxR(J0(6$g%sZj#VFW;BgS$pnzrC zpKIV&TCjtUD3~rD&@TWXi_aldYNTS=z5)iOvhru#$Xu8|`i6I&Yias%;{or%;ojRx zn?dP35W@oIBDfZMju!=1MYU+G=a*=UCjR|ed24h6-299p64Dj5nZyN%5dRw$oX^yR z6Rm`$E%;v+jmb?8J_K85u}gj-U74w^mR6e@=0tJA*pZ9diF%k2Kk4FXUg^>|cu*=D z%APKs_)c9m;`9R)y|wK+a+J|*a5tdv79n-bkC`!&>ozjQM?bvuHeY~ynEjbVw9jbo z{=-|3?v&%dmDW+Rh$o<*b z+12&@_hjdrnR}B3xhXRL-34Htvqf+hH-+O)$(Y0zu~-IJ29UubCgi#~uw})ZT1{y) zArCAW&@U4u_}4$0#VM|-zh`aOyCCg%lvq)iduK6M8xvvqfN^}^=k$YsTVRn0sb*~2 z%#e3o*ovvy`Z@ehtXAHM2!}|*U9UCo2IPtXxl#A1Gm^FYhH>-L(&WHeCxYL6qQ$#S zFuKvq_UQ+2r}vpj(~NoLVQ;>mc2aaWkM)c_kg@bwlI!&pl^>IWgOxI2x9UXbbak#ilfXO*wu-gH^_ zw5H?%E9;uRNt(BYD1Pv-x1%(7ru?_4C;8!Q4f0FJ58sp{G!M&DyI+T>U$mRQL+7<4 zfWI%HCe?g$8E>Kplx+I~Q!=Y;Gn_iGMkf5nW`krQeWBbe4L%G}Tz8!WhS%@inP$&r zxK^90^Qm;|=ohX(YJ1~4w9wr|`@$tzWcbUZIY zOb#}`ydqVFUj9HqV|rELedu)=Iv;e$guw+Kl(*$5l1GVTo^t(dR_Jy{apm{>)rIbK zp;r}&>&KD^-OA=XeDf|?*1O*0#!vubtJXqn{0415(^7d=FBRMWmshw_YgMZhFn->6 zwq4;iM*bjSfE=TCPG9M#GT1IKLHz5n*ElPAn*O1lTDuzT3Fj&^*_|U#5a(p;wT}a@ zDx5xxrpb9ekLXY%?%qDAKoNc_cLif*wb2xfa*bdV~{L z;l)j}&V{?1s-v(NCsP_o7i-?3?iqoc!pRv4(Po^b(r)!*rW@?egw078!dHKsJfVJR z_|Ohut4D+IILSOslx|p@5iIvj2#=te^g+OLAjeuCiG zbbU2Z2m)5snGms^AFV=Q&Px2`g^#0(6>&m_eMW}!hKaEg)_C+xqd;D=l={#55HhLf zFus4n3puGnI@JKbtS&t;c|!rvirZbxA9qCurNM zb{a_JiUMz9mTzuriK!J?;m^I^d>!aVhEN-e`{DGvB3;J~l;$L?Mg>+6* zrCAIweg=zF;lLu2poO3}E=p)fTMO?*F6p*7-(#p_`y53&O$XkJlqA3pFzW}Fc^W*n zKjmj=Z)d_($A=7k`-Tf#_M2?;V|uda_HY$aGDoNVKJY0wSZ@FmGDj+5W_iDd>PALR zv?+OfnMvTl=NTwPO_fn&3g^Qag*23#`&vp0%_YUxA^^zPB!a)|eVb=Q;~RHOd&8s> zqE821)|JP?w1(;=Gy<$9-Fr9m#%5M_@ydBLe14=OhQ$eT|1N9ct7RH2WNHFF?7;#r z)Nqt_L$aSLwDo*GwRA$%=#FMp$hoOXGEI-;Nyaz3a1gFuRh8=WisHvd?#Er1-|vj( zhn;mnlzMfT8(xR9zPZ1NmV~&tBL|Koil4X$c;^rZ#2T55VCc#%uQ^N_Tx-^I;)o;V zdrs%*CqLZ4!b6gdH!#iltWD&9s^@t=Dd>Q?U}p*_hF6Tj&l$igy4g`ApD#_Wb!#4Z zPVk>UPu(r8HNh%)FXIB4b5z`20mErpomL{mU7w2cw#;-`@LG5+63i zX}OD39_t86OfvP^K_N|UeKS8}I{h1OFRc;aHEnx|9r$!jbH1Hy<5gCLRJfSZSY2Uo zAvA3S)i=6lQtG5C?8PxVX0IJ`CfUz*wpza1BXi~ z#!@dM+{DlH>9G6DJ|ojJ6wUV1fv==!Cim&nes(t&701yg*>@9|%f}D+qOb%v+Ac-g z=gIxIWOc!$?&w~D9ON0oMUjHS>*>;mSo~+IO~>_Blr4_V&Y5*Rq1M*Ut;eTx!8hT0 z=(H8yqO4k#3Xl{?&%oU4u_YZx-s`0j+ zyH0JcGz)lAy+ySsu8?Y$Gc&OdJh;+UTa{^Ar`|JynyJP(JH$4 z&=L8zWuB+U+B_B&@fqU$S*{FE7wdp*kc=0%ZN7Isw>TH)ljxQ%iZ?^YwgqAXPa&!e zKgGC4hl)75&mA4q{N}9^^tu!@t=HmzME*SE9a{$p*Wko4eFJn(z;gSM8PjP=G3RL4 zt^4jz>O9J#BmU8M&I~)1KP3n|0IA8G?s0CvBJCm}a~``#dV`$@CItFxhREWI7(tIF zLq`^R`|sj(=n_1{dS9o)DcY>-&~&4DI^sx!Ec4B$PM)E1B=vJ0z*jrf@|gw_G^^<-lvT6xqE}Jh*#g&$RgR z!xUzFl+7!tInyc3v1zGrn&u7LHC~VJT6cjreXrS9UEC`fBc1)NvTxWWs!D}VZ@%We zu)fsh3uZ4r%@2p0X}L~LhX?qI*~rQ#@(5eO1e8n4Qtqp*ngM;$7oB2H?QHuPV-@fQ zNdsOe*Y5Z7fMF}0V{v&RE zt7YVf`dtg*XMt!(vxVA*ZD}*EtV(L8=+pj|?N^UYMxM8(5=gVl+U1a)gN1SNb|HAW zU2&BrZv29b2r zBx$9jclc^+mP;Zwc01iy3EeYW#2TgEPHPw2SK1NYlO6bNz1Y(w<5MYEpxif8(Bcc$ z>IFWkM9VaiMJ%sj8^X8u!qLGYtqxEjPtVQ0lv;8{AXAmN9O^NtSEA?U<_1SZtaVUi zyaBQ(Rmvt_ou1~KCgzZgiJsS#5l5H5FbzaCFU;bMGK?=`+1IY4+q-Wbmul>$^L_xhTWAv7o2ubprPlKfPIjt350FrdS#EEZHn|?#1AyC9 z`u2id!|ZzI54nF)utbtAX(rV??U{4#G;4x&HNPB$!ETh#L&1d^^^CZlIVWO(*bJJC zsV;1-!EUq*h!pfR~;HKX=)O`zAd8x`z%UB$)_q7!P8rB=6%e7E+)V?QoGf#=m(B^+Dc zt!jDV9Yxg#*-H6n)W9CMLbBxdGV`ptnf1>c`6-)Tw?&DOq`Em_&-A%tmdH%HLhuP2HB#fA2M>c`bHuJ%s|y9eQudgB|vC#^WIO-AC5ll1awE4#p*A^}HwqSBmrVQN-^7zJ6ZwlS7j|KB z97jKQ_Z}pEHnS#iluITH=XPshd6We&I$Tp=!~$)hKAU{EM+qIO7LWtIymz7+NQ}ek;5l?G^3D)iSyo;D{?ZX5}(2A5m92!hR0@SA9nGpL|TAck#-L~Z`8bYd5T! zpwMhaexw-b2=(WqSD#i*cgI%(xzIy3CJoAz5Pvpx{9Vq z!Gw*5vhX0=pSV4NM0&J0l`(lQWmg?gvuW>Kis;)8XqLyVx0P`Kd=O%~`v1f)0Vq>l zHt@KcJve4}1Tekds=N^EV3V!|^jik~CQ_aB{vHeBEG7-(F z%do>hk=H3BVaFXgiUOUM7x}x#S?M@$GR}qMey^w3uHCnKPYOVMKzT{`@@Va~wDf96 z0`(sJCaEzYRv8gJIw&t_zh>v}alY$HZx!xi`K~ zN8=AN&dDIFxVtFk1+MHJ*ROXaMuBjxAs75o$%^%sfg0c$(B&X7w@jYcTH9Ty4>MT@ zOl#S&dN8Y-J0(pfCcQYb$L}~O^iLEM zInx*%9et44S9i}+O@48$e7=&7l;xaE7Ek`MQQJ`{lGDq7AejtSH-vpGOv~f2-Bhy( zeDVYqAI|a^n3jLy3>%Bw?V%oBD1{xmeB$@$9E6A5zv0KEP+PMO)qp8BaUsRo(8E_< zY2TrTegknozlkMUeS(g>n)}H;K93`&=;q78@8J8$V%%g1vz;KcIrwl@%!C~$lB@Ye zEmLV+L}t(O^Ks3bl}JaD=$agI#l4SW-GsSg4L)Iygvp=2$o?ip*r4&#!?uka4P>LK zGqEu^$HAT*bqmw4$~)b!?_JIaOLkuC%YGm-I~cz+J{%Bo6(d7?T%LQBL};esHZ;OA zD=&WO`js-_10O!+h)EGG42$c5Am-v+-T6JCME0x1NH!23O6Qo)6MeX6UE$rB$iuHZ zW3HX5H{x~ROmjjSzwRz+QOw|s*Gv(8k4OKj?RnhTYg@sWt{|J-t{(Llx*dS^mwpOuurtk1Jbmi5r&C00* z_5xZl`PmHQ#>ZK@be~-h-vnzN;#kZbD#FAn4Ccu=+bSF&KIQN#F?llqN%g_WJ7ud@ zolcvPlA}+Z&O<6U+ABhweni{K(F1~b794_!Kc+U2`woCxW%|?{Bo`K)dvMwZIGxemyT2GR|ABh zkSgq^A4@NDTA+}tFP;7it;7e@HP^iaXG^=sVcxgzllYuj7G1%oE$r{mOhFd>cg`G1 zybF?#2Po6H=$I#&$9catUh&@7AUr>^CphA1H8PD&1-}Bh1#s9)Q0!FBHYYENI|7m{ z|E++aM{eW3`6`N^qaX{v?l1{+rxFj%kQ+bKGS18&dtcE5xpxG}4)jB-`)Yxper4_E zzD=N=6aST5^Bo4SoreYp!AY3Ju|*5MK<<1&Dj*0a3o?x0p{EoQ(Tn!#u0XBJZN*7` zVO8D=C=YY1?iZ5mN;HoSuI=(=ltT1*i^z<$;xj*5xD$fKF zU9Ae_w7r{@XfEN%bN&6~3>|$-$8CSHH46XfW?wH}kB_BubnYeYt&vKwvIj?#xA}dU zYD0q<;^VvPt!YiXl@I$s-Z`ow|0MEtQc@9vH#j5$%jEv<`a1NKPsg;{2MAL`z(r&toc z?3oEr)B*Ona~wZ1lu48#@h2xL@i~t2`k`=_=2defzJEaYZJ~A12Zy@{$%MRNT~TZ0 za>1+!5NzWMB6igICdTZ|XM}8+7$@!s9XWL0jofIraiRm%h|F4Qy~2SDgBTBRjpDiY zNbs)K?A(};$6eUaQ{l!^P`hb8ASgaHvCe4**1Mp6DH`RiE}wD_w@<2{F@H1TXOazA zB^VyM=-qnSuF!Mu=z&kjKmry9vX6~}+QGV%+^?mNZI#L?#Mv2DZcodrKp9?)3zE#f zfJY8r;C*=K3EF)O3=9sIbsq;8m!SE~tu&`iKS;}r$5kQ>HsbfNt}!${*NXnkt%sIa zKEP*#zv<+1e`oe+a}O`deRV+_L%ew%x$$mv%b2phm5k}5v$Q&V#zMmPuOQck%BSR}w*D@@)HrZ&6n+nbU_1Yjf;4muf32rQ94*!P@US`>P`ZJN zI#h7>%z58-Y=#|RmjUaGA+-5>sX4Lf;`3^f5thlqK|mZl+>YoQU*vfA_@Vmy724K(*t(qZ-tOBeTR_~K*Cw>&E#t0~B?#}_Xtxx@+3QP%G3hDJ zrU2c0&Go(e7}P`vOZyt!3pDO*GL3*Py$eeCu&PgV3KdrmR;<(#YvP>2yy}Z9D2YML5glS@yrr*W6UdG5yI}}u` zwrJ-V5FUQAlPjbU>IX#t{BpwtUQ5C2;fwwHG{&P7kuc==sIJM`GfwES8&uqfv1LY( z*573+Qm9=N~HTdeX2mZ)}t!dISzIZQvH{u0q6L~ju@=`QiFvuy^AbHzohWX=mfP{- zbO-VmiAJJ3!t!}hmbHLaR5ps^QAY$gTh&0-X$C0{h5H<%welF&KyaZ-ReGd- zW^Vp#>{&9ON$xjPnzoU}!KyW0G3+|t5emJS zLf9L_Hq(t>hH!wMicF}LW7y_b48+G$#`XcOI(;Gy#B{b7gHaDGL*+D0M=(m~f+wU6 z>J(!47g*XaQ$z+&DywXMDgaeHU4|#1q7Wv}gK^KMw%x6J&p*1BleX{!A2%A7_XmKS zx&#lNGI`kH?s9*l03yQC&60Pd8W1%cH%Sgtz*2(9v;soN2VX71eAsAHkrzyL^`Fgu7R|%+C@@-0g^Z>Kd}nz`kxC*Xciy zkzpfwX%KvHI)sa*hYvA*qwrb-`6&&ZXm1X$a!@y9LZ}{3x3F*N_WH`0?1b01@dRo7 zv)v{F<*$|I;T*cDFef9f!MqXXdIJT{^wn zliX)64uP|;`pzUQtjkv1FYZr!ZwRZrtT{cIgP1_~crv!=lUe!EDd|%}pY4OQ-g=yl zZN-YFQYEb6FPWb{uVZMN-qgDLVkVw9P6!?61eV`@;V0uK!~19$KNH<4qw4-pUf`zc zgH{(=sTj5}x_GgN=9|G8+*@~8=@)iHyhrlcCc}COIc3+uBa=C#Skc|LK`{MbjWt*^ zzU|2H{h8~%t9beVipD--?Jk+a>FYtvEbnsmv-bAcT zpBm}YstSIjOGNg?=yiP?!a0C_Ko69ed!3M)kRTu0#WWa1b`aR-=ZgrjBq@~{t)Rvi zFG(Nc_|)&iU+h7bFuw|z+;)D4A}nql2o2eDqmhj3P=W5jPzVV2X%g&Z_CNp%8^Iv3 z<6DxKZehbNQ-^&z3p`=e^iOL^ho*}=labs?Z8@BYifq>!ew!ffe)Ex}Pk2yfU?;yJ zgb$~k^M`oHhpj zM=QIU->x+&s3RyXzGFj;;nIG4BQU4B97t|McW(&maa7pjN%=sx47Nm~6*7S+cLuA3 zwR7e^_h6@toEX2vBN@hl3FpJzT`PwL#3#L}ov4(0?wM-6nET9Ish0wG^7Tj$88bhb zKf4h{q5#?GeKT>dT^us`zl}Ro@Q@p_d99MSSnck`OGO!<&Na^h zlk>qge<63JeQ-S|Iqyvs4@KR7U@fkou*1w}@&BVnr1#^m2C46*rH?)Lir6>r5ZUE! zF#a_5fNvnEMr&FTktkn$Z z+zID@ExjfUVoD1JEhd<-9uOF#zf$htJ*)v0dyv-7!hu$_MRq^f<0N)jeDcY#*hspp zE?I@=oz+-5x#!!_mx*uB zh;TjkLm!^tVzmV7tLKgG1tjD7G*%v<`3yA4fySi__PV9RuEwf6A6=#hZ9TvJ!H3=k zF@J`l<-P*?lR$FEVWV&sNd4a>Qw*Sk4rHKohzA!B;^y)%_?SZ+WpG4mSm$&d|6G~? z6aK7dArxbu5N_Y@TXq3icy$G~Z-0+p9oD4_zQRR{{jkali-G3&t+bBjNL#Q1`lqYh z4E!?2@lFglfMiUsy0#=*n#>ltPA9R zqKL@{ya>vuAh&`*c88^b4Ab&KP$Uucw%hMh5$H&g^y3znt=2cKrl2Ja{X?iJbU4Vs z&ShsJ0on}lN%!?~-LAO5!T=2x72Xg~cM;5z0owDiyU2t&cPPkJ3*JVF_B*?r3iC@G z5HthNcSyib4cNpJYVKGIap7DO`O|^c$1w-pc_xLkG9XZZz~^HIxtQ>72VZUg3rSb} z1E2$m8AuTQOFp6}Za|{bI~sDgK%($I$Fd@1&U+9AYucFKs(5h0QEB)O3;7Ui41F;BuZWITx}ka z6Qgv3+xF|&bK}ubd{9cH62gF3ifskjMK8W@JYlIk(be)WxE&pP=S<|o^HgEJ(j3&3 zz`DU7oWY|#sibocB6L;;u~kiwXVYoa}UU5iGsZeB90@?1@IIgy9$C(DWJ%D5(ei0Vw#?S znDuMd2o}Jd@I-fyRiPUajHmv{#|8lX%bHK?ARWpt`R3+Xt5bez7di$w^5kE9Y4{%a z9v~xS(8HEo-t#%hFxGj@?B8j;#CCYrou=0ICDUW?U#*;FJI3nr73K zEhSqsD*MOoC911!6(F=B5GUQd#G;R9}!!DlqH&uphi(c ztT^ss!dzhzq`PS@%YPgs-(8yngn@7UyN%%>WzM8hJo7k}N1p~m%7y)~=N{=vr{tjN zmk3bd(Ek$TOR-)(KjHZ~fLf^c{?7yQo6rU0aIwBHwjL!K_axS#r_Dgjb%m>>6!sp` z)|$}hmHg9ZuVY6I+CbVn5KtuHV1GvCqLBk3F>vUAFL>-vaPHpeJO#VI6B4Li=y%T? z@;;B{iP$4eW54hvUIHqShKK+okc%;DF!|Vq@<&53=0(VLI0?>65T^mwBDY~q6os{g z!IZX2^Rofw@!cH|Vs*`ZwdMrDs_)lcY9vM=EsbIecR5a9rtkvUAJMZJn!j5BsE@(R zBnVU~gQI!oP+|mbY@b`*iaZ|q=sSAfdwmamMWnWpwMFCO#i=b_;1;tJF;;e6G8|uyZ zJ!vY;P&f$`2P&Q}DH5!9jF( z#Z%4W2c922?+bg0L_fa&J+4*MU?Z^O7C)NU5Wg30RINw`v!s-729+;^UjiJFH_-?1 zQ)xmZU%NPxzm&o1X2cYiai9ieoqP=9bBo=W=7zBJ`>znMLZ_GRDGf8v4sv1jdCDDD z6a4xEoKfSh!xktQHjpdr$uJG!+^~LKI`Ct*4K89V<6n86mkS2#$8!WJS9?Xg=SOlE zTa)ZSAlKdLg8(gH({<#cJS9*!uq^rGu2ov!s0k zmE7K`A6cu<&W{d^_E#6BygF2G?&$NrO`cCiHwaq&<|Z|GjzqUl(az@wf1E5OT4#-6 zYwlJvR6lyVpBU8pwrhJ%$-)~fi~+sZDXUi>Eq==ud=#C|ce65G%473E(PksCzYH2{ zZJln1LU%}oCwRW)YLJi(%&n;weu*Pod=MQ)I%LR$%fD7`H(2BHrBJU>+gUeqKKZ0o z*e2TY;6&qWGWwdI{3@=>J7`g$>hbl2+zwCD6AX2sOPaHSl%U0XW7Q%Nr9WfIAx5^+ zD`YM3JB6dbQ?UX>ZUNL?H&l&389UZ=;VL3^C(y$K+fWYAWnF>S3pwAkwz1(q+7Wsk zKRjTt+YZ%Vr`8`dVJtT#-qGZML^7=SR`Y`T?yA`*-B;D1+G_14{kGW49&5h;gYK=C zjkW{J*bA4A?+9KBia%r?NUkk}2p|RD2sQ2#R!?cM@HU*IHJ~yM^k+HM7E zrAM}^k7w^gLw!s#(6ZYjF=DsK=`>E3At|yU_#LPC37YNlEtoer2Je*B@Nak(KH^_x7d_t537! zLE@rN#8p_{#e8&v;Xs&NPwv{d`icGJZ5!m<+eAt?_sN(j38xjTb*F7GQv_L$O!WuC zB16(%K6;{Xhkki~%lz97%qsXs`hlvuU{T4;>_=Xyi&nDbQg(+Z=qoo79O{)jN%6JF zF($@{(f1e(+%g4Nsk2WYQ9YV%L?icaR}ricV{#Ezz7nrRa>q$v^vXsEmneyUXwbgh zUO;kos1D&9g{|Y-KdZ{@6i%r3%Wl--~hK6MXmq5W;6fz+@&17KvFnYQcE~OWbyF%1j6{Kuzh^U zM~h6Ub;|>)d~RRN57_jmrE3IMtc1o>ZIKssThhIAxRW+*{Mr~$!V1IGk{ z^`0`XO@17cKKZSl2d?_+*KCxTjW6AMP9{24x{8#?!s5hTO;X7!F41R>7c$*x~8AOCnK(}xpztE+r z=@&ICS^T6?UsFJq>hA4xCo?+STF9yY8XovVn=Oqo{gxFxZ5YQO-r2jA z%@hAL@kck3v5K$yCVsPPtX*0pOAF-(jkfKHi9>vz!!#;ql0*^CiL@kunzF~aKIq`ZquS6b$B99cgO0az34dL(J8ukMEf#rNHT52X`J8jD0^YaBc zT5z=I3=yAxJbTs5r0TwaS^VjuGbHA!smGv#P7S6;6ODj0RRSs*BV}{9T>rD*NfQM{ zPscsM>JV00!QxcWeXFL32740C|14uPI5gxR`#Y@I+t(*0wd+9ZUukZ#Q4sdsmOEm)Hs=hhCrBU|K%2J9VyrETF<%7DT|w9?rW{%MP!=%%A&-s=DgmRULwzYtK<1 zbN>xlyhB1XPt}nFjI(_dlte;QnB#_?RyfW{h_i%>3=jBZ@vl9@sG0YN&LL-Sa9(03 zQ3n5~ZVaqX<6_Z?uJ@*TcXag4`aoIFaLjDlK-o44Zk%3 z1-=d8FC!Hj8#ZvJfj&x%$vrjo0Wit`{*770b8r$BBO?OjsU2^KC)o1T#}<|BQOlzC zkU}qB1W;DqEPsA~A2}3@uiGyP;9Th{WoAB{t(SV+%yY8Nt>o07< zK>^kXN}_5QOfYXk6JY{%MNG_v!o7)jgTa3lm;r)D+z-JFRI?l%4ViJ;RK18fwFfo( z46Jzp{c}+derg_KhA(H}fhCruRO)h|o22zp-}1Nk5YL38@ZZbg$Kr;e6pP^Dl{vH0 z$*1o-gNdUAhoTtdS0f0&EC2`DIwB8m#S*PS;d2A7=Up<51&1hqU&HAC>s~|wm-(N! z@FVg5=W(_)Ah!9hw}#OlEaCmb2?O?7E;%__T24-4FRzIDW1Gm@qyPStNF|F367bbR zhCy9qfTD=jtgI8+J){V5O(MWGBgI?Xq>Y@05Aql=dLDE&qX|!7v_T&^^ZjJ3p$g}=tjNPkhbo3C$|h>{`qTb~Rb=!4Y@Wnz1%Rq)xrpU;n1D&m??v|MY* zx=>Wzi?McFw#GL$4+gzZ_;|+C^ut_?Vgv)BVr9QCix3lb z@o1h@az@ap3gaanqvw0P#lY3fuIvx{X4~qcjEEKljMHWa)xzdX(Vd=J`rE~q)@%k1 zK28{P-;5(QI$pxZ_cd7a)!?YhPZM)3p}lk$fn8Yri^l{tTR&>YOtWW^;`q*1r}wy* zSG zT7)D4SX4Q~X8PDFZB5Ueq;akQDsPBHf``G>4+C`KfR*`m>f7D#u8N*W1J~!+*A&| zYc^i;aHQu5$QC@Wk@|RZDE*Prf`xj4b-O)scEsAyQ2-2%*wQYS6{)DWxbEg*=tJ=U zG(!t*-m?bHF-uTquLdflynm_M4xo8iQvB(nd5xuMW{2jxn6et3{ma``pz1B(htky< z4?H^L79plUe-Hq{{VXx$5(!V!4szJQhmhn&$vkAfw4#ET)|(0&jF@*lx78UDoHQ5n zSbmV!A}VgACT<2D`IyDOpr$z)kXuU!QAWw9SMMPmni?7K{qxKu9NXu)ivDlE>k>{Q zjJvKhe)#ZZKY7c`%W3iq+#eqrYYRg2#$~?iSC3ba8*?>??%+tPqb zS&r=x21?kJ+-G#FG|FaYsM92&*$o~M6|W%r@AAyso(IURQ(KM#??bz|ye$=kW^j7^bE=wdeBJxL^4^jE`Mq~i%1p^Yf#{n^%r1qhJ~0WoM}jqf zxVQ`DJLnS|JC`4IKwx~MUk^tM7KOhtq1&c0C>83}>W6jdR)PP=>MF!Jc~-oXb;oHq zZKplxpfTBg3m)*3cMN!Z$7BR7t*tb6FaxdE}wvFZ*gVcrtzO z3|@^zIbT%>l2K9N!QQ%Vx*s$>a#2X{)H}ANre?v*LcgKCr{lg|%O6QpJ3HP2$6?GohNWy2YDy-dyRbbEFCr zcKZ?M5+swq~%u?P5?lbs>38Ou_ZGsFp-VN6p%MMZ|~V&u~mK zuSoOx#>Fd~yU#nrN)8)^t)ZCoIEL^;P*eu++xc2^HbbIHG1q zE4^$JnXu;0D!V!n5s(D51AHG0+)qb?;1!2ym;*S!r$2w#w60syfhkt~Nj+~|WFg?0 z^=$+G|U{mL6nX~7f$J3J+dasB?Sni}e z-Hso;iNsa6VJ@<1O{7n6o~C)JeK+cI*zI?ny>ur2fGzzd>b9gQ+}4zK#p-U7E%T_c zD(rACUmX5hpEi%0oY_RF+D#b$qXm$V8-?j&x)Gt4j^z#HHZdh8z9A?UZtD*WQcIPw z0r(>hZozSU}1Ir)xp8_af2v$Yg>Qr1ZLX(P-|&Fw2=*O|*P z-5pQJuqki&Pl+Y)Z`ZyJv?==&F2F0S#rVan8t0B8X)Zhecuwgv{+Xlo9 z6or3MLy4pF3*Q(98K2$Y!Q49g961Y}q00~WaKqaHqG(ZwOOT9RhzA#@!urmw>`7u#vfbt92Pl(rX-)KO_2-LU8~zl0 zc_B8xxdT3halHJsW>>LO%VQQDJE6ueT=C&?Za?)QBue7?p8T97k+XYP#$MX;@;EZR zOa?Eij6^LaU_|1-uV(B?j(4wbTxFhO=V3((+05;K6b;@Ue9{eK^y6th__E}vaB7|y z3C*z0r#w9WK*G*9wE2IcgmlgBHDF5vpG?Wt7?*kT(WI0J7-eE??oLxuOB|4;Qg_qqdVv;bu^PA!RwhtOfOW} z{41Kd*jj-i!DKG_(KY3ItFA5`VKb_TaNkTt60cY7-zP~OX86*HD4kP*rJ^qObJUkI z&(6%1b?9RG_9NSzInt217}z4}Z#T(A*5Vx7=gZ~s3J4PyN-$S05zw)VB9MJ(V+c}q zsGPN^kddRqZzDrO>bsU*!Vw_!IQ-ks4(I+1n#Kzj4D2Dg3wHJv`9%HsZE>@|dBR~@ zt5tI_g6*y{!5KZZ6RLu;yuP^s|CgCn`+y!>nZW~Cko@T@s=MQaDBD`xWhKs<-?eYE zQwjMbXp5!DBSWT3qW1FhqR`+@n^%gVX!18wjAjWU^N9JBTpBt*$Fw zPn=-`o42bnvdsQ01SizqfH&oyYte}koY*lJq89GpW(ey>$i&aY`S3g}H2 zU*?xCO-P4e;kGWj$Q}xLr)`%0PzN{7Wcm3B7>mpA*TjIv@Lt;dBUN=mxhmeqDBtl6 zgzmt|#Q~(h?jQ{Xkx-yp(`b(f6l{t2izy?Y2_3e!e~n$(dA?%CH<7IyS;)w-Ce0T7 zZCU9V!u3wD9bA^>V*+_wXfzYt=0KEBx9w6$D5KJCz6AG9zLD;E8Pw#V6%GGaV|f<> z@JfB9BR+ftfW(87t~%Av(sBg`NByrh?*?uT!z5*7KEZ@$>)t!@jN1!?K&p@9ofYtc z1aDVcuXMI7fMrN}K_gSM_?_#g2JmeT*QrmGE{@k8x$gB-tBi3jM|A{I+NK7|St=_j zjRx?&0NIp-oOhcB7TQU(gPOHV-ugTI*(+x@g+7-xZW`cwGC`#*l}#J%FJacCrsp$A ziS{};-K&8WW|Iy_mQ4%L_Cz_$C6v0Hc5n0L_G*}=U_$Dk|sV}ZXX2ZK1Q^GV4lMoEosEhvnf zQa(8H)~RB?_F>6G$*RB?wL=bAx|6-UrKfkV*F}#E;-J!~B_qV9f+pEMMu6x}-*Aw; zo59B?aaD*u2{$G5Sl=1^9|XW0**`HIoT#9t1RQcLa=xKd%7}9-UjR8yR)xK==`*L4 zNPl{>7^FO&y!QEL$EN9uO^HBM)@t#3?cQw1j6JH z*$-UQVq%%Y)pvy;FE6gk`d={&x7t%`0203b`rqM-a8RsvEK^{S^3u$_Hk#9n=gBGe zq*%G$K35+$x}Tm##KWm82@6#1lHS|?^V9)$|2spzDB{Aer(=XT==hP&bxNE6&>KvT-yyHMg%4M*ddGYe*@*P0GR%Tf((rAx3`(07%x;z_-|a}XI7E1 z_JZQ098bl|e=xQzO&s%oZ zwH;lVh=Wx7W^gcPiqVMGzE&Gf*ysp<5PDKEr5#%kqS__D)Zm1tE@w`TA zDkipp>2}Xqwf(+vAQNyT-^8%gwh2ea4q&56_6LUIAqZj;L644+&#!pxr;(fN?qWs$ zau1{blY1EEU0htVm;|04tUt7I^$!l!=oK;;>VPo$W-WvL3R@|d0FZRV^-fx zAof+uLpLH+V2H^hq}VHbTwBm9WwRuAJTN@I&a_jpZOXU-IQ`833cTtEMjlJXwkkG<(* zVK7yIxVcyqN+BpW5`JHRl5^5qa}dBlGWoZ)#Y`iK>MSvT_`IX*7{a+G_zM))R2Yf;3@UNf4Uo)p?{eFDLOeR zaT`>8Gz=mXZjoG=Y94W8)ML2_Mqx^S9Q)}LoXiIrer-$`uMs;;4%9;Bn(zkWd0 zhi{i%FH)15MizK~`}MuFLexexUhE`%6T)9k(c*b1AQT^RE5pmG7+&?F3l2|ufjvZr zer6#DBPcfVpF0LQt>3q16*2_niA~3fBA}}iMhiZA(sR&0VF0ey8n71pqz}h`(WbXGLDihsqB`oUY2nwBZ1St{ z^&veLDPyyk#{+C{T6?e-wYz(bpJ#sih74)`Y(>2Y&O-~S|NMVafCyB7P0zQw8XR(3 z)U{72)U* zJJ;{oLSoX$BLA$J-K->c@ETU5-gY{@y|B3^ zgrLD?WiDp4GK9}==+akoz=rMHidl(GV1W(e+oOf~=W>0@Cl{jnLGDMsW$WEr6!>?) ze<`_IOgn+KJP?@>(n-lildhjGGybSrAs^q-dHs4B znf~=;hZ=dnlk853ku2XJhB&WvLiP7lEfYD)`%_-KZHzyc@6D{tr9?E5FnZX4G(AfR zA4`}f3_T(`Sn{iV-51~d=-ywyM9ej5tu~zBNca2c5f3I){~i1H{|lII!tmUN?wD{?{=0*p||zs5j`-94xzoC0}*4Mo(Ck9({Rq_hN5d{<4$t+8rt2r|j_8D1g7Ex(@A zqxcU|&0NnU13SvTL!V73mF{~chnTVahhtn5k(x$Hr+MhNM zR|w*(fGB+ex~D>Y2(_5;6@>K{ohV!E3Fq$Vv}LQsj`N-@$>d!KG+EDjqs9I?!EsFn z_%qGeyqk0bmU&j4?<$(CJNexb092!y#9;Jnmzd2S34#tFyy_bMUCv+H@!Pm?Nbn;y z|ARN&w;1X&wB>z-jfrK?jYo}yWm13KG?i@^k$n>3;F&EpoJ`lZiT!+B*e5MIFhEf4 zZ0GNgj_X|1S$KK^kOn%$)p_wrDs%qw68>;f`@dB3k6>V>de(Q}BccWDPag-9%_;%n zs9m1z$4f=DKPWQ9DP@i*Q3vj1wSm9b1Pl}VqS^{iG41x2Mx|xL5Fzz{I#F;a@FQRU z=O$B~=6@=)bxFwz2#4;Er<%*(17f8v)EX0Y+o^HvyZ+?WB``_*CylWNA{YJ9i3kvk z9uh@O%cHBw^yK7FKnJdRKt^i#@Gg=6vnd8<`g%GJ@nTm+#OSCx%{{g)HepzbV1Yw^ zsq*F22C>Uq~mR z@b6Y1UL&ILZ?4%XHi;r(w2!O5s&GmsN0YCpx?C6;s#z}=_)ztEdGOX+aU||5nx|e( zqSe=iaRmIxRPKG}A1H_1koprG(6FS~s0(M(Emcf>HL!lh)ufLOacDx`1)n$*cHi!?ZS?u(ma2D3 z2B6xn?)XbOsaO+{@=B>*= z;AB@2NgapwZ)Q%iKRlRCzw?Du@x2N{Uq5$97CQLVYXt)M&$L^|S}d&0yJ}iV)?p-& z!cROA-P{fLDlH+Nyhb*a2J?f9@b@??569M@(Kh2RIXOS*z9kI~*w+~>mQ5o5CGrGX zbK5-pViL{i-Z79%CJ*t_<5!t}PH&>@bXH)HWZLL~+<;RW9Ap6V-+gySh&-T!)^wO; z{|aZrgfuFxCMqXEF3~%dv@rVj=IkPZsfeZkp>^+3sJHY#%8QUAY)#&~w;x}4}<(U@j>S}gUpM^41qVxBw{_}ZYYlN7de>v*GMfpS28+2=`GNDzx zq9@Pfg>PFNMXRP?G&uy%SvzdJ+>+QA>O{~0o_XMXs7TO~EZK*s4uX`3nimbeo{3oNut1DSA zisDjtw)RhN4$TFFzo~;nqxJ>+ErECds%RC5=A~0b(Wnj>ykfI=IHGFg&7V$#4_7iV zW!O8@1Qvj$b#+chcsoHU+MSD(RC39>D|nVXYt(3W$PY(cz;+rRxZzUe;XsYY5x{>3 z*Plfb7cOV>ut^UN+s&_=>N46k1IG#XnA)!{I3GT##DgbVM7}OH|73>1^sKzACwsoa z&93FGr`2%Hq(t{@;fb?1dwe-q@bO+Fqu!&Fce5v^LX|sDF?!_~Sa@nqi$8d7EnnQ3 zowGzR$%fke5J&dF zN6!wuds1~mOHpwmBQ>m^$`TcB0np?L^mlTds;()YX}6VK55Q-%ZbCMFb5}?e_BCh+ ztO9NKdV){mMjeGhA^|^Aao#FUz~;NymSx=hmC2u?(I!kd*L!^@E{hwRs_+(MvP`l2 z)Au2jy*{*la)SDqT3|adfr&z-D6p0f4O>fGCmxH>kPg##2GAT-RQPM@e1fM4u`5m_ zlwmclYTD`NvlZUHvmd}hfCq7egP;9$b~c$=#lYmo0!QG48HaYNoi=aUjNkTQ{bB}KA)@cCn0D0&2Baq`sWsjn-WH|{%?K#NTV_gH>vQ2 zE-E1fK}KR;FFX;p9Cg~JrhW;}lSlQ>gGjd(txkP)D|x4SC39XD1lefF*hyJ!mpNXSzv8@_k-4Q`zHeSUutT3`|jKc*YCug%BSOW1} zIyLf%u~tGC%#M7AFL-7TM=}*+@g7Bm%cH3&u%<)>&N|FdTxatL2G(?J5W>Kce&S47 z7eEta$>xIv$KLNHA4VNG1aO3wEKhmuK7_--S|%Mn;aBb(wGFjqzN22M(rfQz{#vZa z=QW7t#qm;7Sd$q|8vB)BY<1${S2)~le4)Y5Y||X=0jxH7cb>vFoQ?cNnj7Ze{)?rV z%$)2^ZOayfTLSnn^Sz=}_wEFt8q16`8GQL3=|me%UZKczzwJZj`5z z1)6?-mTdU6J)n;420&yJAr2=2TFrDlRL5}ax`{y-KH@hKs*K}~JuPMhC?IGqeophN zfJ9CZc2drvI4A^N+s^7FBwb_ypf;7J+j34$77N4y5am3i#IDi(2P~i^|n=Wv*>&N~8G>ngE9Fo~~&%lBn+kwix&^;4^Ae4{da4Pr3e#ZM0wdQ`j2q ze3C~el)&HGmjrni{r+2Qkc{YBKNB=1IlR%*cc@+!|6QMlcz4T+pL0^ZM8xM8I_lQk z)I>Yert#Xdy!_T!u=`n7-CX4xI?#$H0|Vwp>?6+v%Ymx$Fg4XVX6thw*m_cSXO1)V zyS&1!4Jsi~YJUqc?&k7eqc^WaV!V3&GdAfXxYtw;BWCOvJ->entW z*^r*n9lJUjK?2WGi4MKM-A$`QTvt~n6~F$?3NhPbw56HgNARXAARiA;SQAqna|;p& zu8%N&7a&4Fg3PxXgO6y5KMq2uV*QaHr-cbHUe12i^em1xu)`mx?R$O~q!HHb9Bk^8 z2ps_6>{>6t$|z(0cBvlM<1=%;gd$93oSyfK4TBcxu=hIh4K)`CjK0qrY62nP7Q-`W z4htURuTanm?d+$u?jxj|ol!(AJ!!N|K+5M0q6pq;x2K{thfUk(PNA<60EGh60ejS~ z$vQPqM2hL@Z0%gXQvk6}ad{;mxy~kagS9&lr&!Q0(1kN^U&cwilod{AR_$Y33C3Ja z_(dIfLg}+V6Ge+Yf~YGy^&`AAs5IMI%f*FR@H|Oc)SM`Dq4EWrB-JfX|{k zEqE?*d{i(27>R%+JK!1-?EviL7`Ymt^Ef{HE;qt=x!ek|FSjJlSJucSp2sgNQkyr2p_$aR{{Ld*8~RkePL z-OcJ?EeDkieROo&70jMNiF=(=Mue-=e3_Nva@w$(ncbp@iJIX*Mk2x46;kke5pYvF${i`1g9cMHH#ER*vnwBkqjDkPuN~2YD)u8JJ zdK3wnLAW;8>hCfAnwQ^AFG{H_uVAetaBa&PGjIri)5u)JWzl{4^i-$e^12`L4O&%& zE40?^#IaUHtlNy!2yFv2N21$7}d!e0_*R6)MscD z@(&_KM}&Y=)``9+;7n<~#Qzg{s(byia%%*zS_n9)wJRkG&mr{y4)w8e^H*5aDr+*S zPn>CU;Kp3>A&pSKLyAv$9C#wZGf&u!1oQFihvi<;-E0bZJ_6QCgWm%K4v9}BxNKT} z3*ByG?Q_oB%{M|c6u`S7=-BOnSt%eo)Mk3$-_QLxA%(wE^pI93k_VsvpbkChV-QG- z!bz&?J)l~!A!xBZ9<#4%GUtL#Q1p=q5-HiW*(+{qbLFWa{5+!G_=cXJdMcXOgvgfo zGPrnuxeY8xcGQx#)%bo~W>bkj8=gwoAYg!#nUSn}@c6Fg)+VDa4i(xj#(^nKu;MVd z#y9$nJ*aGt*zOB)+sJc;B`n}a~VOgv8`D=tbc z?A>Z$p9{+W4dx)KYVffUU|HTP#RBxaI_-seqnL;)Yx(Ldok@`pB~~H`1r9R+op%T# zo4XTA3Q1!PJaBLM*O%9TiK*R`NoHQh=G9a2pMLulwc#TdNL~!!bDjZ{e9^uG#}mvc zl$`*R=>zCx0K!%c0My$x2nLA8mwqh21pGD)DAj%;M)M`|WdPoF3QGLD9tQ?a(G0e1lmco3YaJM(`noZB4B zyq*eA(SXpt$Lo<%YrKTKb3wY#WYY~GA;bW{1DG_kNh1m{92=rSfJNe!8cL#8CkFvF zqHeJdA6#ey@+`%@f(d{mv-PgWVyCEuNz6vkJm*9=@xY9Mb3Pzq@^{AxEn&Jj>(adF z@Bpg^e@-l&ODQhrpo9oOvu>+zJ;$;0fV!IZnbw_@Z(1_Xyj7j zu-;=|q4z^|1W{gd5%1gE*(OL>o_5=GS|$oKuyh8Q@mplHVKvW3tpa6YB^}(y>U>PM z?l1U&hiqUBt^AJ`z#NnmAbchcdT11TlerAc&q{|F5pg$;zsmJw7334-0!vsY`dVcY z69lk>CuBk)B)5D;?dXq5J&OF|vqPvyP!N8Ou)EJIGkOLb>6fV<=`&t6lxs5@%=b~S!`+8-_5Oix-Rdm4pRX-goW6!E%gY%*J z^>6MG6f6r6$6Ou~=T-PjYvCzdLqV=*>3f zSZh^rJef4dLethw;u$WhyfPd=_NTKs}|?} z8Lk{w@$YAlnAYhzYJA#p#N3_vgB{NvcBpECIhUWZM6ZNnP}DDK6#ULw-$4A4YpRM= zee@lHHQXZRozHfK#X*$XxN-VZc_s*)q8O&k;`rGVeszr-HDj*ccwE{MACA%FM-HHI zvc2Bt=ZxseQ)y=RJV{i$#eiaLd{Wt-d!>C^v)PiIUpnDu>hl5hos&7B$DG|FdbGMk z(sH`C4mdwA*hGUo-UXVdv4&#PIJZAvLEPV{jcC4gO+*z6EZynsdQ_{7e||#;ZyOJqb%KsW9jSbd zo$G>Gp$Mgkd}@sB+%tJ`{3?Tp%I`HlG^iM0N@E6?XQBL1rOu+xh8P(_6_EDX1I#QF zJ8N24T8;7b^NBoI+SmEafZY&Hfku|8(V(qh(bo5mX{Z*3KN}eehj36fdy2palzHDh z$S9Q>oL<=AtC@6G`3Ac%`+c9fikkC{go8__$#f0(3z?(tfujZ%3|XmNrRHIRBr@pG zLV+d$ARnB+6c3O=^8+-{f}kV0)S&jk`5%*i<>a2KE{=P!eR_06Ln9;5=4@zU0#z8p z#OHQLkA{njfLXmTKrN5FYW0U2LNcz59xNkOkV&20EjPou?7>;j^K+!_3p0Ew5ybKb zBSv{NoeH!5YRroB`nY=5%Duj7DLJ{5jYtn_5YT?`IdHQmnPEcX`ipJ*HnPuCVwOYwGv(oy7}@?{p)LS zns89;j|&gm+=uzz0r#?a#~Y3WPp1LhWuz<+=C#W+2f*&)1mk4_({I-3If${qmkh@{ zWo18gAQT6uXKK_rvvQL3S5r>L=)1jl=xQl;M6ouPTSWWiaIy9C{HL}i^F3M3%o`jo zSc$qhb1-(#`xLJ?xn$?R>jo_sSGk^{heSIRz{tDLW6uFhI2<93YTQ6FSCVLBZoP}Q zz`f0nLDZ`8o0yU!HY=Ta7wI~6u&!JFnYb%h@n~2+v0$Ig zKjkeHI3QG1R71a=Kr>ap7JayljGqN^w8OIWTdZ(GRzJ`tU8`V&_to%>zo^dlul*uo zU&qjj!79|C*V+6z@B0D-MD1==B@0;P$8Tl_O+cX!;&@8{o|w5q^^eqJmUKjNf|fI+ z+eS*dM|AS3Au%yABJ5N#?KR_s>x_WRxdYynt7_KgylycQbWsNxc;X|kQyDiTxE^)v90V#W z0KHvzl4Yog6{xi(zKUf)U|o6$oe<6aQ4mmWm!$bvi5|Te`}pWfo~)G1U2Rp(Yud07 z*yBq1QuPK&zm^>hu{@Jx8v6-1RrYP{PTSbIG;=iGQEhhOe?-9JnVYv7Z7a1Je zw`H^6p!>h|1#Vc6xvgiAzsLsz-QDBIj~S{!2d3rY^T9N^H7W4+$JBI07Bx3j0yee|js%~C1XQ9o(3H($)HHFS-G zn}r1nw8ZpU;Nj;d!^Xy@q3F2-{?T*vS0H#4`FojH!JC&~OA~u34w%Et(DciEDlidi zAs6A)h7$O9UIAGH2D$VD2$mdVzK(i>UpzrKtiO_z?Jwy<0WfID7w(Kv)L{!>JkJBw zO|1e#@C9!c?R~?Wo0sh<4O*$%-CWocW1)hkq+p$f9JHX5*x_mxc5OULZJVmFoP_s7 z^;*!&RyLR}eGmTb;)hDl!})@CT{a|`>f>yRO245^YNHfL`_-jAhq*MzKcqiHL5=^T zM7cxE=;gnnqAaCyuWRSE#CLW-vy%Nq7HBz1%swHG0zidc?K2T0P_CBO(R8=dM9srP z1UwpOT8Ro}g*sgv8f=}qTg_GDYZWAbDigfzC!MHhxcOq{Z{A={6zaU%5Ss>kF9cGM zQ+e1=M-&;8Ys@TxUlHS{A;E62TQ%ge&OEID|5H1BGOy5@GvjG#8t!X>7Oem!%a=3Z z%DUf#909)hEH7R>0u=_pQiEo!W}qcK6r~KDN6Q4P2^{btpq0%nhPb;_8RT1)i%o;T z&EQ~(0I6%|MTb;C6odwy1k5C-C>gh?;tyetOMEqjju21DB5*q~KgW}JUo4W7CU zEsae`maF&RN?gGfV@69TEBg{E!g0B@|73M@C^EnX)u+u#--#) zHIIq`Fl+DE0~@v8y$fD@KJ6pzMlj404iCGz%i|0j6p}Iw>G`Ky<(tE zWocE7^}O# zzXKS!607OwiO?U7wOCox3<8_*I-N8T5XoJdj8wlE;yB9;rVtesJ-9wAHy`EK6`ISm z5=nmCy-i4Gu#@{o#OnZ%ak`*Q&R-r80(n@?w@B~>x`Yi;s}3%HzL}BW1r7xHYmH5= z|1b3iH_04FZ#$TLttR(mklP0xYa0Hq^g`^84BS4buJT=3F$6n_Hp$x3(jW8<@1pd< z0_HkDp9La(WQv-2Z~(I5&ag}tw}zHoTYvMn9M&`Cx?+=eSf&gqfV%*0-*feMp#;Pg zuX)pJP723pnkPZ>Z_(1ekYvAPQXec-?60&bC|>WIWDN`VWZgX5e-}?<9Viq0xApP7 z;ILV2#0Jz`6C(*s^2M|Z+}^0JNb}qrLK(=&sMv^{rJ2qZ-kNXvXD|peoo-wOwdYQ9zQyXK;bXgvYWAwhQWGIoWQ*A=a zZb=0JK#GMuL4Z`{HxriuVF041If(R(38Wrj>0|IBT3V|HxNhb{xH!WV%7#M{*}3Pe z_D|*Xrv%kj6PvDpF_uK3Z=rB^5}6OqS7{v^L3k%i@7Gw8h9s&h6%rCWrGYJ+D|{X; z^W@PW^ZznC*4C%48GG8;<3LUE8@6>f?P>IDGeV7+nKVjf8pLe*Q>XMv@2mhC>tavS z%%5{^x5H*9zj_SFaff-JC~Fg~+&Tr$d$mc@ect&0`25G$a@$5N-go zCki{-e~-@FmuJx@-_UGbj);F!>lye{6S#-^v z^!D_$rU`h=`K2!q+Wlq_@ZbNg)mXCT=(8AU0&cKJW*Q{O39VlhA{XLkim+7))Ez{~ zf}W}H3^X7~szoBYnyMgVcoK;m+~~^8e%O9c)t0l1G!00vN2;6%XTHx{d-T7Ba%RvQ zdU~eBx_y<%Uz-Pc*3Lnh%hRpryE+|Ay-cf5kKNjxq=5zZa;`qDP3 z4`G&w1$#~$zUROz%a@!#fX^bVJ>lxv`3Yw9Uz#nTO~$gYiVFoqYZO? z*nH27zE#8X6a#T%R&07btGls;^Z0)}e}Fzg0Oa~->lbb(p2^6co2DGZ+POvuKSkB< zj{o>vR)FWXog5lks=SRRV2Z~8U%-1d=51PM?x!*g>Ho9R9j78O^8 zl+4f6){pN2{~6y4vWC_85ib%UWDg~Hd4`lisTeKLHl|pP{D| zhVNpo&a3WirIbiQGt|C@P&;IrahE z1Vq_(8W0YNv$?zRWosT5R463^d4`mz#hQ3A-Bi@J7aqV;*pPO#ORpdAsYRhhMpWYD zpzt92{VV&%iyU7>ihKB}cfQ}q^jSXKQ@wvLk~3=X7cHU;=mbAt?8S@uBZnSKf-axw zpa7Trsp|Fce6F9+cc|`myZYs0GAAc^M*-sdnK^Sjmar9r2 zOxNX^7eX^BWINSwiWqVa(SHEbmq%EmMdU>ui^JRH#cm&Cau7BXAJa&!tQa!GK?h~O zaCPsGf;RgM<~Mc%^`|Zv#vL2(MXmyX8-QG&q=CEYI;rB$sw=<;1Jy21TDz};@P#|X zD4&OISpU6=2_j^{^F)qJ$lJ4>=#pf+M`OPADLgGC8!Q+IK>)cUTaR`hAlS8imJU&S8gHe?c=^sTorT__M1FOj|OmNMxo+}pOW?|cB zK8tVda>rqljL1^WLcFc4`PS?L;RCszRf1*WM!#Lee6DBB0!zinI}w`+bzTCl7zPMO zpSjH$=9eJ#UK<#Np~)8!30QNWnW`yqn#)gwP^&#aq~;aplpIHNC50@w zp@9WuV|q~h9--#mLA`@15!3dABN*;a+`PGqsS2S7(f=*|!Q+#{+eA-Kr3rv^wK#sD z$r~{@=2g$e^nEn%LlnU0P1NmW9Qd#L*?cAjnd58r$&RR#mrgTBLg&pvkngT7&3vOi zZ>t96-%K|R72VwWRmhO=+OGDp!#v^tBkL`|s?MUW@q6iR=|)mg5a||FL@DV~TDlu9 z9g>28bP7s`gwh}((s`x3yZe8xGxL7$_xl8l2n>+k$`svURxSiacJbzoixCWIt8m9Npow?P*W z?E^9%jbGM_yIm;J{cH&YwwoRbBWd5Uo%!_JsUiVo01El;vwVE}m&6p$t$sl8Brcot zBB)jU7iE;Zx~AsYr4~qw^y(Y~3~Tnm6_p6iPV!g)#H)EA3V_DGa(4}C#&$ig-n+jQ zN@LnRRj+_HS|{G3w>?S!Kj2R>c!mLOC16}+`12};vw``oe~4e?@P7Ra3#!*IX@k6w zz&7E4Zr1WfdaI47AUdb3k~$NF?-SX2+ro$hNM zIVMqwJToU}#ROtOsBP7dL%ttD5I5Chv_lGHTvN%LW3|!I(c?=YP&H~bdVZTaX{YHF z(S0b|&q}(u^))!fM z*w_MM`?(88z79!C5T17_y&Ls_-sw(S#VFBuPqo8x{x}8w#OL`6N(>cb{2L8s}4k$Z4r!PDWvA2VorUtQB8 zsIxr65RX61Ne-X*1D%FUZrKaQA`{C=Z?kQUKld=8(#f&qS!bf5^2n$7YoHf)fo_|o zk@6?r1Cb(0g@S`Jse&~QEd~L~{Y5E_L<#+~95YF|ZRQ>hI+wuJND?+oUPrID9y z93*SkqQpvqq;09(D2y zO|QWt43*e&wss>t*sm>feDSvzM`*{Xl>;~%6fsTx{OYlJI0+}4KZST_dY#$59IH4u zx-9lQ$xMK~^rD|~`7vx<2Cw*xIDm<3GSz~oo^+s-n&8@JiFe~$`ulPLzGi}m!#G;lMxw?cIUq9UCN+|*M0c+QSz|lam%{Neo_5&>uJk+>vy^<{{0JoBiWssZsqco zQWGT68$0pETjNk1sF)O6|ML_QbD2M=+5Z0hG4~6eeg6Pu(GNaNt0@}Hc#+ORldK(Y zvYL$frRzQ}PXBY3Pst2H6$~#5?NB(04P$@*tdTnBo*KTrVX)GzG%?m~dU|+PMf$#E zYJ9xtrfJ;wv?}v#EHt1#cd&Y$isT?Zg5is6F}G;uO?Ss zkc9FFga$$ngN4goW|m*_L0>0{*cVtqU`$k+!n;S!=bMxedJ$Fs2Ihl!Po6F-)=L8c z3A?~boX-BzhzKlo4UHVSmf;1zrsYi%11AcpZ)|S|*$JpvBF;7NnsjvIcwaDR8p z0)u~jM=O3EW47R)q9nDXGJR!EUZ=e1xPSKe0;ewaY_^E9&10z>r@vK^_N@MoQwLvw zko-B3G;P3qS`_(trXh0qE9cYql6VU|PNv&V%O9Jh37gMzw=fBQR&u$OOl*s_W*Fxv z7>-PNP$gnFyQY_8mr853UN}z1vUL+ZT+^329j&)IkKgkPN@~uGdVIR_LrlZv3-X!d zMRu<-_|oMEKj*URT}%qX!&T=cZ*A|nisd`$?)h!&2S3K2s2B5_i3k>Q-6!mjW6xiM zwvZX>hh}*xezn`$XoSpv+z~#HX)K!bZ=7dm6D0%>M<}x?A6kdnnhoNIdV+Vo{8QVWW`irYq?6OA-eKg3lz|r-`a0arZF>aclYda7gKL z58PtI+)ZfS7JymLhH zRhwNr)j9}0r5}7Sy;-MftbUO~Tp*8_A8D8xfi{rskETT^=344&j>J&FVoY&sX6 z-mRXW%vPWumkj-U$aQWY%`q#a_kIz7Yt3fvn@g>dqF?X8Ko@AL8JeAq&Q*wcYh+|J zy@rni*mlvF6XbIm*qKqL_f|i(O;`HDnV+4z*Cldc)CmMHoblZn8>mbY7tU^JU~X~_ z{Y`69T{zk)`>mO|8YXuH`%5FT?k`x|{|Jni<%WyeRWHPQ7Aek4)zDL6?X;`i@x2pl zF+qD_Z4gb^!S^;JlJGDX4vrCQ%pD55k?XcH#94f`8@Dj@BEza6e1nR_;k$OnUh}u- zrdSYqEnVI(#Prf};pN&Qp;xJdf>ls7egYz~+N;H(Tm=BCDb{r|mRK z$1>e=^C`*Qlt_bPmZL!u7XkWJK}oZ)`7b1?7cYJ%57^k*DOgyr1y&r55~|3hhCZty z<<#Q+*8IfB2Ll&3ebmZ!x^Cg}icso3+go$Ui(41U7K$r6j-;!l#lkV5SzQ{&Mc!lC3V~QR*7~eE?M_x8L#6{N+X@5q)}~}4(PW+) zz3!iCQh8s!+#UW6F3)wlF+ag3@i_@8r%Tcw0c49 zUB?!JuFs%)T{r!2a}v7yRFy1#sKztdpC{7>K*TQWDVsE7Lz3B;BkpWyM2`g=@qUE_ z2S;5r)MIr@oXHET#xqv9_>ii`Ypq&yr=jTuQ~-78eAlh{eOMSgGpy{#4=TU&cDljg zw*#AY#3*TK<>N6^XDlIs743_Qi~H$&-sqN=mQ_t$r0-DylYG8JcNgaG57k0}nnH<= zGn5!00y!ALIy(-(O{4@{J|-sSWLKb)9znppT*Tk6tvRI$!<8k@HOuv`pME7Cmo&-L zzj@C>C9@0@(UG#?&5AY)>GvKVGH&0InAa#N>_nR{w+>^n894CfdfQaeRN?Ip~ZtmKsLFk4EG{E8adDJpo3JDWeSBJn7{ z87ku*P`z#0R$OoL@Dl+oO=yN_?e-K0hApvs)UKMpS(kiCSIZYxuqP3rqM6+;an~sU z#lGL&#pD~-_u5W9O{XF&sqy&a!f;bB8p&gf4DSPe62HQvU#y77p5ZNoI~CeXIe6SX zFcA7YU&gy(4RZnu8=H`nRFL&k1(!{_kmKRQ)1~AN^NSDuJ?}kQ4@0!-xx`#(y(}Ll#JikM}QQhO$z&6=f;r#tAy-g?UUK>0%9-)ZoFU(Cu&is#+Qx_GiPD%I36`Iu{4{4|6EyvE znMP2OSQ@$CGr);KP=c(wKel9Ifcc3aK=^js{duS+o_fxCyJxcKj&b+8XZe>N!onu& z!(@*s>+OL0-iIaM^ozJ%=vjshjK>c$K=xY}znF7B(saS;?kCQqoeXp9<;mW*CYBHZ zWDM__*1qk9X@jmWMLorVdj&5Ul|~*Di%bt%s@dc8L?=f_?{`yB zmecYPqTfoc>&;-N!kOHzN)}GyiskBFjefY@6FLF{nD`H&N5XAf^jn3*SDgzw4V6Ck zd5u9%e${kGXzQjk=&9%uunPwK(~*f`D724)(( z^y{(|=J<8??tYOvhhP3j88X3f!<< zwh*TCjR$G9tZWYnBLH|T6yMd^6THu1^a}50-|)enPSMF(Glz5i&975yYKhNXcE5D@ z{R|v4wGN^hi|ntD%&zyPn0K9Vjb;L|-|SMhNZu}63DmkS+}&)+s^N7?9M8*mAB?#m zehLKj2$u(>3)$#oi~D@y!((w;XY(=;0CjYgU3vi09FV&7CLzx5P<*w#n9u51(Y*X% zn@4!>u#U+}@DU%8E_S65A^C+^mD;ng`sDjNw(1-o6N0^wAw_Pl@X<`d5j3d9FOv#( z^5Y7d&7B<=%uXyvW|Fg8iU1j7g2Q<{9KB0&aOVO9X4L0~bo%4db2Q7@5AMrmUzB~| zK{T{1eG%cRIh*I1#$9viZHO#oJ->6GXt&IP>aW3iWq!;c>_FT7Xs^~8P zWLOHNZ3SpdylF!6whwt528hJ?MvSr@y(n4Mcgn7%gl?W_gTv#&H_U6`>xf}#e$KUj zg2sx(#F*I%`)6pE(kB8URFH6mOb$M6Pv+z>CfTTPPAxj-hrI1sB9CQ%^T-!hTV~!g z_HJp;ovgU{vM)2{${p;Pf?2Mh`jLRIiY$4|3XDoL8>bE6@k$r_^)HFMY}8$Mor^AT zwuR38n@ADK^eeYZ%e3%(g)f&12rsjq`4B08-9~t2l@goiRFLMJa3Bci?q-eA#Quas zX>b2+oNFM?fSf<2fdh<79#uNENFC{-Y=Ld0n~cj4G`9blgXOt!QR@&WdaRvO2oxIO ziK*#qq7y-tRZ-`ayOh7hNvXwDqBc8Sy*-U`=e#qnue~jRrs#d+(y_nAjD>;o&tS7S zO~les;&n&l2$@f*fHBy=e*MCflw^AtZb+*FQB;a(0~;g9{)q^P5kcUAzS*rfpIKA> zq7HsxENDJoetZe7hOP@x5)#wmMaeH63L#g@El-e~Nk*+$5^BZu%iPF;O39)W<>zNe z-#rK{xz62sw!ve(HajN=iS<>+a+35>?!Xfk8Hc-($>ahVGe=IU4rHfVRJwF48F;;X zLb{Ek2d9W$4;o_*g$r?u7i9nTs{8lv4#4p1(VBy&b)CW~h_z%yiQs>|&&!=ffqFYOHg^4%%?R_5p!=;_)-yv4B~@~&j-HJ_8VzoT z4}bLw)|2D)h!nlDq0E7X? z>G*+q?6;s-cdfI^lEo7|ZGTEb`>c(ON0NiWP{0$UpWrmNPXx348)%t;<*kW^SK75p z?8c|GnRuMah5TJnQYA@v=cdzPup)zEciOlF8OcYKttb)5DGod!Ab{eAIybN)J3HHb zA()H+z?~&9)n)aii?OJ$j>b~OeQD=9Bp&g&_a$3Zu_+x(c#MboAh|B%pNCw&d1TNs zKX%kYP-Sa@boIhbH01jfpUF98B`O@FU#qW$6O5{lCLEbrqZ}ybvuN|NEzhP7R5B+> ztE7H-Qfb+ff*V2cg$nVEuAP8UZF0ru_Ok5GbKGizXlR?)O?Q8`rx(DkN?jICNO6xr z5ICYBLj*-Vk@NM3YNwFsx>2FMiTuY*H536%j)XLU%!4myaPD;0i8Y_#uT4aeD6q} ztDpbdI7rSU`py2UR`LfPY1WzkoFzst?u|9Bh+viMt^#bvx5sj6{95{# z(^7qWSK$PG$Oa{bVH&ax#sfNF3^qSeEEf-G7%|Ov_G%Bo1z>h9Nm1Yxr&3JU`&XP# z79t`&_@8M$d4kIE;K2~S(Z;?cWNYG*xhRWNBSN<)is z82Te5+EJ8+2Z6Ym>m_wFkHr!nC$x{dj1&kh%6@EEu%1&(QA#uIWL20;D z=0)Zbuzdeo+TEpux_6A}D!R|QCF^!MVZ!o%(H+T%wNEB9?ttAoB^g<}et>CCJS10) zKg;TzUE2+(svq#owf%JxW;0_`{%tFN3dF0C?~Q1It<2;_8buYL_t!U0fCv74$_TvZ zupzA+sH0l^W^=D-yL8J)e)k%25~8x0br0*`4T>5Dq@~}V|0G5s69^X+a2Z(c$%-bm z-_CJap69ub)k;ys+8mRD%lipRoci(a$3i#m7(5ZtiXzLK>K&7IXL;3`9`cz}K&xeL z8GWgd<3T@_&FM{XJ(7&Y@4PLc_klO%Cy_5|Mg|9YZKf=F$(7aBgPJ6~M9OqZI(KJ& zT%Zm&Fyo!Jn+diUp~(bz5T%86&jB!Gd>2Qz8M-19?(C(Kz%(tRWd6qSiwvAvPX(4gRMu>vAjZpdFSGt41Z9EIovslt zdDf6c!=IYjSpyO>GR4-(!9nGQ{Wfe2Z0sd61%X#$-_Me#@(gAX&;UT)?hxi23SuN6 zhAS72(fVQY(NoPoS5zDtoJ4x>0crh%lmxfiGh5%J%hyhoU2d5xew!6+b~D{psNMP3!sEp)}-F@qGymE(i~mZ-nFtP?yOU zl4Ff4i)+iik4E`yHqoC7r?_Ufo=V-d{)%&Cc-y)8%)CdW(wOGT+t>Ftp>|_^C|8x2 z6ari$0t=8m^(!jEi;Vn@gC&Vs0GWzx^dZ~uGUG3nn=R=XP(e1rK4CPIo+JOcsQ0R? zy4Gw$!uWm3b;|QYzv;dI7&UWxlpo+ASN>hP$X%xig@6jMjkJp2X6AmX(7G4owOGuQ zkKOS50|*dmzBa%b15SVb6u4RZjH6Cb?sltdpp@e5;D*`ojbD!kguIrd)j3Imo-u{v zHib1Nq;%|QZ;YjOX0+M(_sF>HryxBFjSk2!kV|*)wS|AnZ8i2H=nLv{W?K5*XTASU zUp|+xv9U4US099#ml$d;|vl)SXZkxu>up5qSH9AzR^?i_?YVGv3AjaQJL@QzUlIV)6 zFFFmCDHLpVhvk8gXxzT2d zl_TB^H4^+(LB=SAU14wfnJLv5K-Da|DAVZG=p^NuLs1F7H5&(qjGRrHVF`V~|cV=d*!35cdyF-C~*SQCKa2CV+7vPHSj@ERJcWLyRy z87vCL`)bO{@LA$5r`PVb%_ReeED~&iUzCb6D#Q?lP^mj*+r$z+ZBxH>q!!P3vw8GI zodcM@@{qz-CzO`SE}?p;_CGC--MIp;)`mAYYZuQ5yeQ>oNs2rxS66?gfrfpi2Tqf#{2!@L5Fz3Y~?sq;W zW@c{(GQ>-lgi^!8u0Q=noD{%F{4e4RMe)D@NO};o9)T$+8*U(m(;BK77)1a2^?c(r z1Q%SFktunW$t)(VBUpX#BY(rGNHIC?R&Z{`?EX_}cxK|5`|l4oQ-*1AiRt|B!oyaP z`2Ve~{s0$)C43~`^WMFCLY~Kk&R%0*U$b#?9*5iDW6^vWw%CSvUip_3_!cbF$Wj?? z9yeshondL3%y;+nA_~~Xfu{FlnOYIkk?z&g+W=9sq?OvBLh|m*zqP>wSY4o&JQmkJ z^|772?3N96(L)BdhjF+vf7f)dRR4#4=@;U))E{tilKr%*Q|f$dYTyRMqm$sA=?54* znnu+7b2BD`3IQV#y=!gm0AtoQl5+CEUP6PK%TG(X_ZqSoZ-mn~j{o$J6~s=4%|ade96vsH8Dq=wM6|~K>7r2-n^M`$eaSN^Vdh^*;;>G z{&v;)oN}&kW`?kCH@<&|Jn_qvXejnRdi1DVOWsUkuEIEUU;vM&TQ=*r4;C`nb9y}O zL36_WOBFA1IBwDT&G4k>)sA+>$2pAHhsG<<*%EEu>}aJ_|8Lz)Zh1~i_Mf@nWDYfZh*r!*H9p&&u?BiPqI|QSu^}H#%B@!M z{~f6gsznM&Y;tl~O-&p{g+_@^Fo;aem)0^*NfLT%i~s_BvsH%#;dMbOqeHEJXCy13 z)d^E}L1nw69HfFOXmCQhNllove)#IJD$^7-qkdI5qiqMr`?HOYRVCTN2r^au2+>d) zK+9p*SMvkbupXS`Ns_-s>&tL}cRu0rCi}-4T?M-X;9ci&1ZEJ^rl}T>H@!#hEFSTB*E%#6O2D;n}@xbz99BHuosDhBj2L}lvA|eJSCe|G9PPQhHbDBZX zjAC9KBDNxG>#wcGs_ff(`zoEDQBqfG_N~-UkF@KIFCzwtSvfC*DrN|ST)qwZL!N z#Fc7nZKQ7Pb}N-r_GU4`u3~tv^rjt*e#`?<;P0}I$dEiqN z=OYF?P>_+3Pzn2Any>o-D1D3?9aO&4+`AVX5P-B%;}njRJ0!$2^^{bSXt2tqwsm|A zZEB~;7|kMGky@ybST2YibzOBEs`FZ)%2L6}q%mc861n_D)8F8I!Tc6^>*NB&TG<*4 zByQmac5v$&#{V#ih!6nol5*B1d2*kbxn7JFbRxX_ri0ZvJ~1I%Fw!?Ah#f(WcZ_R3 z5OI@U?lKp2qJeE~hTO618dK(ka!>btES z^vE5lQ0<^EYOrjd;*>ID^ENC(Oa$FRniP>mj?20NV3YR$P=(H_hG6eS zF-nk?l-mBgDe8RU-A<=3u%6^UR>2mJg60XlSRB=f=+z#cKlfG2SC!6J^}m>syUnGO zqJq85CFEOJ;9g0EfOY5|VBhtl>>Xh&XH4hUCm`$s7yUBgowPeV|? ztAz4|EodJFIY;XF)L5s9Va6q;_A2(WDnYMwZ{bP#$Kn}4jyUX)E-_T$Dc2g zuRC)Hq7ec~DTO)kGM?MzW#FCjSHvKuLI%^0};p*Rl z7;6J^7H&zcL-DVmtf#l+|40Cq6Q%k>6QkireKXKA_*bf_A-E_kA?Rry3*UP#WnBF- zC(O=YqrLQo$>IwWTh1a$Fl*2jaFt5;HufX9nn)C<8x36k5$GF?RhhW&F-N+WXxMgS zmm&9}j%co-5f`A3%-LB$10#Ue!Y`bptC$4C&g{x}>W^I^5LIHPif{DTS2beYG#NsG zP6TAs(*IC))C8Fb(CcrQ*%f!#3zVR|E6@<_eh{8-u;Yf1$O^8ajpq_qhCOL#yl@Q1 z^=0?M$pCZaqdl=X-u$^2@Tb%qMe=Ys%a=^j(%&t!1xZ)Oll;z&cwmnt+V+x~$Y%Sv z5Bs@MZ*Fa=0Imgb?T6gaOM+C`>U#iw=|%LQPBEyaYm`2oxidkm=L7CAl-Sg;X|+~qX`9yt`@#(K6lucd*nF$da~A3*!T9( zb9<_iT4ezXV9l7dInJ}*fz#>K6(RQ=Px_n2Q%8ad{(AVH1@nV`UN6qQbNP=Z*9{yaomOiZa?VqglR=peJQ~2Mb}GodDTpBcPOk; z9t_pXgD>vvVbI(6aumh#7@37 zPYoN}I@wm1lXFv1QSk>7()M;35wXRz*}H2 zRsUzFW$ogQvkRewXw|8IH?aqN47=9i_I&s|#2I=7f4>ikxb9V(g_-}n0DVuT;Ba}d zGc#e@sWU3|riz{wsp^`ZzmisFa|5kaRXjMv4Eh=8zddOHomvi<)tvT^?%-)6_9BoL z%LT-Jmsgm$ z5!aSV-9uzkbE<<}xBjQZNYNQ6+jeso$OfEx5nyHILLYSAaH9L#NDfu9h{DaA%vDGj zFxtBPWzAaaTD1^r!O04njXFI)(>+jYpPq|-)8XRc2X~)5BHjVl5G|)_Rl<;Ff2<_U zAQ48>{{kCp29Kn-U+|66DdY7y{LxdT2$^@PBnb5oR4w&%Vjt5KKtcQbOoE#x0fbzs zjoB`s&DAMkED9M-11QHXO_C*13Ga&nWB! zn{uP$eVIL8CmQfof&938^t07>@;4fJN;A#l!LK75d8WBTVSoyDWy^V%Vu7Gn=Q?1V zr~+Ozoh;T`al1reliz0zUL6b;8a;*EE02ztQ07({yqu_Mn>LnSh;Xho3R^PT{r%+3 zGs;TAv`H@X)y;4R$QxZ?MZm@x&wCgFitH|oHEYdi!sXu16g^w9N9`vu;;5{(coQP7 z$5r7#%v~Qg7e9H{)q~UJWp|=%e&N|uCG>soz@aMwJgefKm#(4}aVaFh;4A@odWG}_ ztvI)PB@o;jF8e8fPLi~_dx@n&l5eW*y;@zGs*NZONxEeq;5w|c`>bAREeXunNP2)n zrKHp~;=>09y?K{D9=|1_!Xlf^mdstz-9s$^2Ef7!=}&Q(<_ej;J|%%r(A;+rJ5$!i z#rx>NU1BgZx!^kIY*HvZ(K7X2qSrWG5$I&M!t&EQME2lcz~qqsLvsJLlT>i7d;1&; z2Lcm=X&p+7me2aj7}K_`t?h4i?K#cf=D*iQ1zfW1O+yl(qTkD22Ib;8*~(uhgXX1~ zN7dCXN}GEPOL;vh+fUeLuIdKHVqDC2EMR&!E!8EjipJV31dz&KZLg#~>ir7{Zn-gA zz-4=jpOrBB9A9A=0VG9%bK0wg;KPslvWdsS@HFX}`G%Q$ks$yFys@pEe|oWksZT3( z2^k`cbr`izq~}M(ZoVtFL=-#UJ~y9eHV4eIr!RtkTav_kL7xW&U>E` zuYEw3op|^85&c#gyQ-mKOk11GDdLj0l*i<;`7jq?EduMXM7z#8eJF@9mH6l;{unj2 zw%kIA-L7e>a2FxJsVsdhqMXJJTqn2Aa}MBXEv)gB(bdw@peFiVB>DOGn6!Z=rRbR- zZe9(`{(sXeToo9VKm=^duH589t^{A!VR_jUka^DmQ~W~Jsn$z*R7JukPbPv3KlQIrs5wV0PED=>H(n} z_rWl0b>G09x$svi(fy;OiK{2P;}@GyD~0yz63u*71_1%}%PDZrkHUZ03F$-qR6tDs zBL-$Z$7#$*h*%f;>z#exb)~cRUW{95=PGjM{H*!h&+!1x>5Ymk0Qcm=Y|Grl+{PX# zQ~=1z*%yY$snjAWAl3!5285;~d=31w4Z!tDLq}T8sqf_g-g7LC2t@yOL6x1#)#l|xApE;|?cRPk zLKwid(qrn2e|)b%Mu1t6XtKcq19U)HWFQ>Tb$Vm=>*m7Q=E1N2KUSKEXT2kb2U*Vh z;{A@O=@X}JmPXOgpb`EdDAAcDB{OM&Gc;@!70ra5{$sjTP<>Z``FISNGP*>xobnkD ztg;-aQUDv;0T0=H~(8ak&h?JAigoo@K&rf3m5=EX)pnP(*886Z_@Da9yo>Dm3IaA3_1X%&*?2NmDkXvs*l-cjg zeLJ28K>gbFE}lD|46!2~DbawZN(o>cBI8a+q5vp_Sr)xH01R}9s)?9GbDpgi5~;sx z;w)}u_>kBXn;NQZaDliE^#~KWk|Y-F{7?3zlkmrn6?;KA05GLokQH9SG7H{OtZ9}o zi_c9LqzvIzD7*k&(6hl0tefYnhIQ9FfOM8VFtis7I>w@6m*Yj?{Bi`PowO4-i_AtP zkH+JgxktPdz}^Lv;`tIyEajQW=+O?g810ITQ=eBiCOKP9lb`?rd3?TZfD3&3E!u$g zdJ4>8!N@ahnmxXuD<1=Ua^6X72zkThJ(5461*YG0N;GuL{+7q9#N)Bdj*bOg0n9*U zpliIz@4bos<2tku!NO- zzVFx@oI?RCVbsruhmPv6Utj#J3F+*KyS1QBk!bw+LK{d^a{N3(BmsJu4&6uTDh0*M7rjGkPMZoxsTJhiyzXqm+0K%j|Q zoOWU`A_Xk+1O5QLMWQ~bpc3^Tu0!A-(nK`-1iUxsP(gx#i?IL|OuQ4XwhrK+fR`0JVbttcYm?Bh8~yQA3uF+8amC@3g|&Uw#1J?UQghFJV8I0aJ} zMl5qQiir))91<;3Wq=q8Fg;idlgY%B2xgzW6QE?L*9931FnEH3Lntru5roH{5m?dg zXHSc%v6B=ow@;sND%VY$DFfQnERa*eN&Xk9(5r1z0ZRBFA~f>w0{L9fZZQkenZ_ge(BAH^d^j_>G^q9!l z+3|pMJUNF@PRso?D=?8#0Yl2-!-t?ompP#?NeZc@wYAFubD=zL#0%8fLk5v_yn`N! z*q@)v`MUSIiTMBBGqb;c$DM4Rc>YblSnddC8*11WG*jSBWNA~Wfxy&jj6{UYdk*bL z0YB4+{`UuBD*_OrsWmmw;cw>|eD9~atn2hu7poPFJnIGOowW(befh%IW0t# ztfB9Ep58QqjFpg-RP_e%F;@I4H7&xjHQCwzQ7_~Ko&(7l5a#wMQn>#ScqB%)ep8_} zDYP3kIwkVb(oia3lWkymKY)^;0@*gjE~c49SANZL5BRe3rZ{z_n%@kuk#BXEnnAS#f>fpBZK26;QA3j80C7G_xu>TO`}g0U^;+nCcS%gofTy5PHibUfhRt-A!aw?S9ZGjritv}vMXWHuw?&A z)c&L+zU1fN9Qa295le1FszifHK)|_kd~R^wGYjFzA?$lpTZxKY)u@yC4_ylxk3%T? zEuFwjI3zGxZ5cn2q!GP#v6h*X_*eAL6pAH(UDGlJ21tyS^h!H*qfr;gp+7%jVOV!slPDXI> z5{y{thz){~vA?!;_h0G$u&gK-rz^v14_|oN7xDvQR>a7~DbC<;(!!*O&iU=DS{<3ds z35mgBLCKu@0eG6ceKJ z2_Lzey~bqLDp7Hh-uX-FKVCT-w-6{SE=HNxr-6StMie4B@D|X-F=8RGu31QLg_(Ko zZ}_|0^vzs~HN<^hfxm1uL`f$R#8PSSuE0f-J02rEu(>B|H`9|Gd)&)a52Mx%)D)Q#|O6V{1CKF&2V& z*CydQ|Jq-o2ewFHx2NNUhq!Y5S$4`D-?$4LBv*GI=_w>}6@LAia(E@0#L4^V#rn?9 zN)oumPR`#&uaAX3MpWMWbG7%^p-!}H3YVWIvUh{~xTYilj6`s@zosJ`Dw_4LjXpYuvmq>zXF3b+04YBdPay_+@W3mj!R-1<#TwfC%rar)B~mAlub zH1IPb(kjF=aEK)TDLi?2iDl<8Zp3*`&#J@RkF7bhf_N8T95yhmc_+Z6U7t4j zo{d*H)4d{T`GBiGo=POg&R?vNbYU5?I<*z0>{(+4N?h0BPsXpGx&Cr_P{`8r(Xm!-Bo82uA{@VgZ)5z}Y`k zrEns-k?e139jqrWKiO@fvv_Og1+K_o_EURNfzN5JjOYhs%mRdl#F=GaR6e%FA0N~5 zg^`fB3cc(H$)j7qjMy^&RKxnolb;E-KkK85hb)$pWS6s>wYd|rj+4-VReUoklR-kl zK<74Zwzl;4yzFb7Nnm|{G;>Z1pkAV$k)|Sj^8Dm{HTmQL{;kCy!~=cyGuC-k8+fRJ zh8+L5HxUp|y%dO}83vEd-vwEBC>PE|BJEFH=G4%D&KT-k~efD3Q_AqlS11Zt~o&Q%S=? zb64G4N_aFV(s%8F*bYi`2li@wF#r8#F-G?MR|}`m;ceDl=C5=lE=T4B#XIlePj-O7 z1r0ine@z$UVK!Zc!ZOuyE=*>`4|%>lo_+;m+x7AOwMG0hPAcSaycfcx^9ib*(?U#2 z8bW5^F8$r-r^(&bon;`d=8MH*H1o3LZB4Jn)1O(ruMMdLt&M>BMClm@B#Lb@W7#BT zDJ)HiWb%Le5d+x!UX%dO1`r;BV+{(qdu{W?svt~q42?lR%ZODGkt+#w5PeSKV`cTjCR5Qh82fbxWx3$nk^4%udXxDpZH@Tnu zGGyf)Eo68`pAWvT+t{~qNGDw?XW!q+jtc#2lipVtQ4wKaVP7=e=SSq(zf!>$~sl!h~EMo~8)k+naRW!mBeN4hdYKCn4Uq#3^K3{SvR2g76{~ z;b9Y?1pd$~xxRInl@Uxh4(bIU^Z;!3H{k0$jIb)|E$Vb#E?}LeY;^N{6 z9QgSI^Om;<9qTD0mNmHd?rtCd4)*mc2N-)dIx*31A&^s6W~VVL2?SB_e-920z&x_l z;b-?5b9z9WVfpR*X?ck}LaEhgiGuyXng*SHFWX|n&@cI%a9lCVi`WA1&=Y!a#2g~` zD+hwb&Na!c#gK~*tiohTzYO>cnY5EBL0Wtj+@@j@V!zNy*>`SQ>Vpy_h$J@s9Xz1C zoEHRpc5+ZK;HSUr;)!B|f$511JUlV^NMUUIF&>jB0vqXOZv;&+-}3>p3O71T&W%Yo zHVQen zBSJ#dP6z%}*ymFw2^3&hCQPfK1hTf))G;tGU}I;xYuc2^wPCwcy}gv zR0Nr6t=dW9K=B-+CSGO$HJem?lNQILdA*tzlZPJqHCatf8MsOU@}M-qQtvF~zOJkx zEmC~G@2pKBF}QWpln4FK8D-qSr8`e;?P&4~)D9D__@!`eOhJJsjch1=N{fRkih5ZkiT``3xoOCi0Mf+!HiFtGvEU&eIpW}cl9qnA6M(-FdN=vmS2#4p$DPRs@j znxm^G%9x&Kw}3BrF6}A^9%$ys&Ofvfc)hfvWdjgLfQPOt&j4H?`$m8gI$33s9#}0$ zLJoXjVBVzjqFD`MdAhp5|J`wEMCey<_^FI|kr`Td)n0>K^zUw4 z-`bLW{raP#4g7}#w?m6aWyh_cuip_Xuo(VhLCt(+9Rv9Pt+M85z|&a&sSX#Vilh7S zSc3LW6!EvDAy$7zqwkFI>bCWNUKKOM7)fAz$AlQ5n)oohj}B{C>yC86>nv3w&cec% zaU9*Y=_)a38GWz_a`9#8_Swgzr2mhzw}7g8`?`h?9ft;KBt?`XAqXhlC`y>5bf|Po zx3pj(EubPwgLF3{ARyfh5{HnkZyoRTdH&BczA@f+yo_`2^*RP{e*5gb_FQw$wU+#c z+o|u~`5Mq@BJ6N2YL*sj37|UP%z8DV+5m%3S#D#3f{lA^o`5;c#7`vgfr z6Vs~14$_f=ygepmID4bt40j3&O@7&gA$$u8F3--*MT+-R#Gr*-oTp-)op*Zh^z{ojlmELs z{r;Pg8QCKp?1VZ)4Bi775Ij%+T1TJ3-np55D=+S8*_96Ur~)uL7vQ~2z5nII+`$2c zA(FnYsmb0KL>7Z19^1aaO~1?qwJx1=4k*U)C=9vt=FMh1$EJK#Th-Z9^l9_27gP|L z`b`8}=IVnql$AOUb_D;LVIxJ&kl>(UWMo9-MWJG99M>AiKM=6<{d6r}%{=tC%j+So zeR$t5WX zS$yd}9#$TKDT{Wc!fXY%&Wp*1uZbb}HXAHDIaYUI(t7yn&I069>b(30qDz`~LFY?j zvCC$mt)JU!a0^)UB0Ij&tu>sT^>HN7Yy(+7>om?B{aV@$ivEr;J- z3jg|{FD@WfObbjK>g(S#7S)dZk$*G|uX?O9MvRU1tmgaY{|X^II+Q0bx^kQ4uNPqI z`B_FEKr+ zq?5QIVV6Q{K75o$3EjD~wB+I*GSXSjG2H3ozED4!G$eG4Ep(W)G^FX#>9jU>My*2J zaYvHB8@Y+|C!Qy3o2Fpu>nfbRZucY1+7Q8EPC7LIYwe6Ubi;GKUFV3K=^^*28;TxmDr0|2-ZgGW$9sV8iOgozX zTqYg;g5>-_w@yE+rMwrpGo&wZ5-x8e0ZkA{Xo0_jxsVIbYs&aTIrRW~17 z{M8MKVh6LvvFBKbF~q@5?)Ur;S45#>1K3J6)!fj0qcyi92)25(pxjl^pC^KfOPgZ6 z0;zsRB_^fN>Dwv4cY0Qx#)#9HUkl7nTxM)#3Upjp`(zYyM6(D@A#QvQv84k4kHEVMI2(>MH}~B zl2t^>yyg7*0}OMfpfEMbRI6rT&p+|}*dErFQq$@NHSxyi!M4~Pov?JqTi*lo5#pvM z524BDwVa9iP$VsvN`V#_#4!(i@#4kx?$=VDqmL{ELj)@FF&#w^2D0!5! zUf;m{Fu^mmQ7tke&24iIB!nMSD$CzCv2mQ`45`1Fd3U;97D?rijl!oimnyC;k?(iZ zUjyK)(2e%hVd*Y}sdN8!nXTpA-HT0Wis3&dCZ3<1c+*EV`}T!nIJkqG zG|k@LTimduQ&tb?4w$2Sm#x}5n`z@vkL&gRZQe^GBE+H#{bha(4-+H)%riCqsf&;E zuz$(0V2FGC(-)*I0|{a@+($C^wvnofi^2py-!H{Ki)A#!pgT$VYx-d|R+iCkMS4n2 z)MFbPC1>X=IUrX1;OYZ!!pInkkPtr>?kBvqIJ|c^5+mzI?>3c zH24+j>$_EDlUh`iS1!~*%cPe3IT5WJcq=e%c+vav<16(J$HdZ~RTLzYD1zdiSR&NE zP&Rk+XN={;KFojA%Z)yjUwyI7;tb67zA77hs7B@@37WdP;gG?mS5`6u)r%s&72amv zu)MlDewqt=r4t(2;5^vPFq;nW`<+V}3z!1-Ll|M+lsD^iF!6umMaoJye_9-c44$vL|$UF>j< z>{H+PX@ZrAr}x&p|HN||ghMRSbzv4)-|ZDA(s&fm&=33li_HD9T)bh?Y@NoMAt7M9 z70ArV37*@bU@m#aKCJi6pJJbfho?4G{@Eyh^39LY&9o4cii(O5go&K(u=_2iu84@p zed$Zf&!@bVo)jvQK>AN};~O8SpeSmv+?kh)iU_HM<*7_|^egpQG&j|)Lk|cz=gvax z$;f$#he#}Uch*T=)6>GAWbi|1i@O?k|LVhv6^f3uS4uxLLHQnb)# zj}N<*AcATC@9SYDtEt)jeqdu&j6qr%+M-ixx&iKimn{V(k!VToL#+}f(j zC@Y14W6I0R+W=j~p;i7$lIU^;h1R`$XXvmwMg7=5qR;-<_a*1!ou%W`d%Jn<#)lq{ zcw{pLrJ~e_+o4{EJE^e3P}121=|fd4EV?@#_Ls~|qX8>sp&_e9M=^76uL{XDH0$4= zMPJCETT7mqj%HX1_^{DSQBh>H(IGs8GKkh4+E&fv^gzJG6ka z^5DTUI4dSl@h0Xl8?t-ZK9K*rf@7Dc#FZhd!FvYeFFR;qG>)@N+RD!h=u^igV~BQ9 zrH65i23Ihcgi`SZgv_GiVr+ztjt&&+a*sXai=6>9i;n=iYwBTd> z>~?6&{j9eFdXjxZp(P~2zJdePQcygmw3i76GYOjT9bo!$^U`(4#U@ShWP zBid9J?okOCZ{ywBqwSRoAR99XKC7a$Z7O5QX$FF4>o{w@-PgvO@61Plw zkXhf);(=3ZgmBxsHM_8o{`vDav;zbbnm(#nK4m4fYp-rx2$%hHqGH%C;DxHj$|s)> z@YMd5Rbv3cAuJ-Iw1pOChuti-wz0|C6~@t?K2f64{%$U`lFmC$upE5vOCL1G3%Z?Y zn%}x?X)pLqEvbGeC&#z_rpU-E6O!Zynd?W39QHAPr~leqS@MvAqB&Pug|bMl^-0e+h-&-vBhIfGiC|W z({X$&R51afyIv@v18N(2U;`M5B;fGvMsR5;+ASD!efjc5-pfl8h*3DC%ydxo$|N#b zM#^OOxsWcO+|E_7=?aap5 z09vjc9c_{{iklWzetLYn_ytgb1ofzxcYr-o?d4JX^l}^tJTcS(t^_CHm=`iYiVXEZ zpbx@+tbNSmVkvEfj~=k zRsz$B74lJ`h$NiLao^FWk&%%g)HFgjKuy^5MZ9qw)Z+itdh&4Gf(W&D6*R@7b<@iX zz$yH5Qwm9>$ACHxBC?4IC+3v={E1sTcEw}1m6G|Y11?kzj$C1S!d_}2nA>%U7JYLF z?wuzeZUY92;igjT@Pc7b!q4<@s_65b8_dwbGKoP7*J*JtsA|!3fb(Fq93Sp*dKWf! z{U}^}THLk^D3WeZv?GG5QCmf?JW(-JE;Z^B+~(u!{nV3GluiEg^X72h7$Rz$yK_}O z^gZ53CRk8~;sQGfFIYCrBj)Blt20C(K(B=t#A{#?SBAf~qi?la_Ab}~_QnFVv*#IB zvkLEYMS~|#J~d>|?u{R+o7s+()bLXw181|N_X0VV%J=We4kse`g$nlHA659(6_-pg zF0$-C&2rDX>1wIIR;j0_xzO}cr(}uIhsQl>b z!%OJE&%Td@k8ovW<&z%a%3X`|pBb5|#`5m$j~ds7U%KR`ozajB0|LZ3#6rwromF#E zvBadV5)W7ssO8ZGahBw;&-qy}eCxz%*}s<9eST^>(-a&Z1#C3-m^VG>!m$_BKt|#! zTx+%*Y`02{qJ2&&Oc0#o88XGos#e-X>g_<&wm%ZxC@MFf*gGD{-tdUI0;O>B`OHiF zj9x)+D?cbk%Ij6myz7nw6e?kx4v?b9k(TTo&TP8(?>Ec1Fv2szK3rsZqyb4F+ci3D z4**yEQPu3?`fz|h!Fg}9Y1yJY*u(4-)d0GQ4Wx-a9~~~X->v}~S=%+?XX$tWDKen0 zl&z$dg^+%w64QD`cyg_0OOn4;(0KNXON8k>r=U=Z;*6lb`PO0a*gED1#7@j5d)a(S zuagu}D2Px@m@!H2eH_?L{GO|8b-0dP@;3!0bw*sebcq_jfH8df89%o`M2$d9=(CC5KBSV%sld(r+b_o|$=J8hc3USp$E>Eox$W zne61tcfG9UZXA8i6L?p+)AZ&|9&W(}#4I0OU|E?g9hVs!(+reV2S>F(n93vL(AEru zGZ}vQS>pZliPIJW6jo|hRe~RCgPu~og;3XUx01i6)^ZSQK!#sAp|SMWug6+870{KHMyQvRgKx2uXIlvXLYm^?jLkn5Bh$+eZeI%>WRRR zm78j=!TQsJc7y~c1>2JaHIz=(wMn?&1UoH->j(Eg94dbMfk0kvNj$FV9rm8>zrwZP z^)iU_Ea!9}TI{8vy1Rt!@ZQ-wR1B@n!d=QumS<16M{ICc3}kmzyAn7(skwf^+@?L} zHhiQhmaE$`k?En0cT+uJIKAHG?Y-R71Ab_3HE1PL)6QWph0g3$LoY4d`}`;<|Ez97 zVZMB7t!{c(m2`>1I6??6-9k1MwYdJB6)CPTq3db2_r}kQf8+|)5=x6ZxkVt_Bl>)>5l$8mJZN`X073$ME z1QR-ffo1~LMm~4oP@nr{8$cfo9UaD)ue+q`DY`Y@%@faHCL75Xsh0fH`jos7{-coT zP?^NJV?t+@7D7G&1>vpZOIPt!uH)bzaE~q=&5Lc_>iIxzeaK7hG;~>D^#bQEu{T zI};wm9sji9hD{*Bv0hY+rHHsj`RMU9`AK;2-pR5Tdo-8av}k{!rGh<|wzt8LEy-}? zez1J#<+H3I=DgR7>pX^qs2W{SA+}<%jb3~g_wnv;{Cf(?iF2%c{A++%w<^gIMIDf|F z5|E!&fD{_K8IaVn(7H=W8E9*V1`v?Tp=cY-3-|HLXmqldmzVP`y3PU<%%YRsk9Gjj z878G*QY)w(h8fdQ@rQ4CX>sfb3JQttd9~13h1MQ+$-J?A1Gw&)UBjIf$=6oB&G5#8S8+tIhX0^~St8$bPmG$a!H+p@$o+ z>GWh>fOk!R<>qK`zPdz}1mn8;gOr9T*D)5UnFtO%i&m-*zsB#L?^TkUetdPb8?Klt zU}RT`<&;ef;u>=A4B`qif%Oj&@uuUHWT7t`!pg{I%9+94USyMkj?*9VmB zxB1&7u3wGbXW*!Lh$41Jcj0H&CPWD>#6dYy=3xIaj6b-$GgrB!2tNYWJI&%Qe_|k`1#oRCMvoZPVlm| zwZPD-Tf1Q`Z5)aC#|@5BppX ziv6aZkPuK;`>g=PjgF81RT62-)*e(9!ZUpR4+p1ku#hP+^?A&Oy!3nFWLidef2;zB zALJx(hH!=`LYs^MyZ|%YJencbqrmY~YKI=sHC&p?_wG=u=~smANmdxr6B@ETJ`qRw~WA)WFv5=F-g6RSMGv{Gn0O zaj(+`?yl9}!Mi=XfJ!P!;f!tnCoa#j=uV33_ESIH<4asDa^@ouaM)C4aUMnUv@VxM%)%5b+yLUlUZB!7o60lv`#w;9)pBz9t76NX@X#g<} z=cs1GngBBU0airV^;;=TOun1=t_7K0u>Q=ouz>wOdGcF~JDrWBh-X-n9im_Xd=8>? zP5Yk*P}M7EX1Kbfaq$DtXB+ggG)n&OX}I|G$(|;YY?@_m9TdXBvp?FiM-Vc$MlBS< z#2TPskf}vQG>5zBmM^*0VQ`GxyB=J8O%dJ%u_15BYFwb(O2ngt@;TqU0(E$QSzhnP zn>yb@S$~gOmb`eYVj_N4{-3K7EMJkKf3w-wkha;f!I(HkC)j15uotqQ_*zLKl@)}$ zKzx`;?1e@awtJs%XZ`@hoTNqokOD$9Byu=*&+_%6?H>a%0%VZm&M^%!rZ1WHpnxIV zzc3NiUl-Qq>m zwq`%U0Q?}%Fr(~PkGx0sxRI1tazvy}#z4&6yUDL{Wk8tmmfrW6NP@p4;;+EV#WL|p z3#6t=B&G{Ny^l%6_}TS4>wi>MxTllb3hUJ~W$;fuF1a+hdF#vN_20#PgN4>tdYU+B zt^8m_d8{A1CgI;Vb2t96m8`pY)NWDlILq#~!*GS4(X+#%Y$p1R#A>X8AGLuaon5?| zlvIr;Tg!ND7gVBAj;H4}g+(T$T8n9FvRnwhoz4jtS>)a<*I%AXu&naLm-bjCNxq;H zdAe}AcmF4TAz**YM~~_dzLDt$F=0k4^>9Tm{`%#AlUp7dAtR~G{`0-my|@{->y)R~ zsRgibijmQG+2y%G?KO1WubZUe{wHdH*FU=9iPLtvW9!vT-`suVGT&U;-!8e=iS+T` z-WfD;Z!=~WnKwfq;9rxyyrNhL)}rSf7mjC^RNZP;uSK0~ifdl&0V2NEPym4#Tu_I0 zDE!rXX%kit!CnJ<6@2G#RYJxMBA|IU2L%vEVUMG>oW;`B>znShG2^QLLlOLsJb>rI zg}B|6t9^t1=NK*%zEH3u756nF(8KYfc>i9}I#M6U%Z~JS{iEgJC$-lOat|x*+PduV z5(n({1$?CK^rFS3YR|k!`LUfNC3=A(yxpUGs!!Edv#T;RQQz^{buTENY3uU(%qmmr zQ@eXVIv;!)!&+62-B>PvZl668{AEmJ#eGS?s8X+<5`XJcWu<5HfTY_x8&V9fa43TH zcu_fT=%~dB(AnD#b6e7FcgcT99!yecn*)^ULn_ z#tGG=`hInF%x?U0{i7;;i<&ch9$i^A7B<{@DvM+qbJ{YG!)iU;yGuQ9Bon>=7`Ul# zL0dh}fQXn`d+~uh?{`#E^&{t}>OR)U)rC@g_p#J%+(PTIq@wniyt`-nOepHV)&%u- z8Hmr2nxu>uGoJo#Xj7(mAFk;3(UKJQT|hys`b;GLZq`srjSK$QF1n3(lIoxlh6_B0 z!uKj8E1*0JplfpP(owYp@^hQ%WhZB6aCD@?eGYg6Wrz#N=MUqr`Q_caTBcS;5_~Nb zE$Bq(IpBm$+=Qc9_75&n32kz2dW&T%!7IYk%UNmWjD*n_hcZOZIbSDXS2B|Oc)+*c zxr3N{MU9Vb`{e_9=@Rv@jyu-z6}kfgJb~?-`X;p*UtFAyWT*9cqoes2S_25pzlyIb zHJn4#mJC>^i)(CKox=ZBcH2GrCJ<}hNsX`8XkPfVg&*Ooz#(gIZ~xQDHaOw@Iv+3Z z7E%j_Bf>L*{X6w`=DWKWQ~Mrk>gz`Wtozy52QmMZ^Fx`xDNb}d&S|v(P({!5Z@54`OazCo=dy^G#!+{o{ zKO^-EgC?7sm25Mzbh4xN4gtix-P!YnQKF9mGT-WQPV>z2B*WG0;(M;c(_3N$56S^(~?KSt^ISqf=KPx_X zlJm}lLT~3R$%qFcy#m=m{e%0P$a>_mKuRgmg{uk>#N&WKbkcHx!2<=>0t^M2GN@O9=zEJyqCklOvmLMYkj%TMrk2oknu2BI|5_o6OsMZVAo*6`@@Uvq5*PVeBDXx70GARB2 zXBNa{`54Z36Sif>#fH2M6F|l4jP>&ir40{)MqN93G7e?%*6Ri75%KE<%5Q4_|)|L~w3QOW}{Vx1~g4 zck1a(45}S1^?^SW`6XcWlO1@%_ba3-N6qfE_Q{vDQ6%ms2Kwh)W&U~rjyG|oEJbHV zmJ2r~_$8a<6e&X$u=BMdfKf12ceJsT=mD>Kb~cRTkOY47_?pQXVmt_vzul`uqrZZ1 zvbY%1uCuCfabt6l@EIr}m`4aH_xc|XpqeMv>oM$#l`-T%N^bamS%`$G3oi-j5!)C9 zPf)*&-C9Urt28(Bjp<#5MVClPh20c#uOr1-Cz8I1k-hYY9hRA0vp6c;ujA#Dv0`og z(}102gzucT-Xp2ouU@~#AXSls$R@(>YOg~LXyk{v%P_nx*yLi8l1O0(L3jgd6Qe@` zM`v~ASuzBj=15*_RR?ihwJ_>@J*Q2jw_KdM zz1gAd#oDzkI&YTuX^U$x?RKj1vz4Df2f&GAi%~Hw9Lj?ci@w(J@$q&OUc38A=U=vo zrHI!59bNc#QoOi!&Ow%q9;tw*>M-@o*|qbU^_bAZ!6(4XkT@rj(ciL*ZN%#k#p5X* z5h8zMe2_0PeD%u(-1rf{-UTluMiOy>pZ1Im&5kM$x03L575LsrR!R{7_>((Ueqi`j zJen{01U0lflH)=5+ucoG?e5s=e>*4K11I`7H#Zp!c7ZDM%u}xul6JWUY&RLR7=`QC z%ZhFTe-|%|sM-YS*`S3`i8eKAq;4HA7g>$|enr9;m{H@w#fjtBDKquyyF?mxCW zF?sDh^O44>m3+3dWc?+5*`6DtN%^CO8L9 z;?Y}Akw&~@l1XU`{M{7J9@~Bl@fy4txBknUQCfcKPhO;k0afU+pbX;otzFuTZ$=;i z4Z{qS1=s|iY z^CO}ipBmoCIj&FEMHlxjIsJJ26)D|1Zg(Q3VUgA+k6_xEq(zj|Ntgqia%n$4PRAhI#eP*WKEUB_Fw>GAA|Kqp(-U*6dXp?$X~39{Fw;<2*U(k`8uCnn<>4LKmgD zpPoWm(Yu!OVTs6DEfXfD08xOU!axqeGz)zEH6AZfPU8EJmul{_Hk*`gXXz5HzZven zI89hcMath;Vh+5bVRxG1A{xCbZV>$fU-kuIjuC;tiZ4KHT>Jj_6Ya3y>bg$bnVR{i zu)3tkqRzj&aJ|>J#^bgx(g0E8(=~Q!xnQ@}zS^c!ZpdxxW;bJ{{Q(ukzh|CF&qQY@ zU5C~4B~<-=PNquU4Hbk1MX-TV04*V73zbls08SN73$ns6bU7VmxB z8xP$r>f0fh>>V2vnw`;kI35a#n|3AP+|s8j2_2A{+jnDKV}ePA5VDiHoypHi&^r(~ zX1UoMyGxW9$t#SD0MDz&7i%KJqd48y^e-P^4rT)bGM(tbCuV}+P_<=SE^zheQm3$} z1A)H=+jRN-(OJ+&>KPwW({hJ;wv;2R&$h<>Dq+qoYheDU?;9>pq< zRLjfDD^Ici<)423-2J3PO;@A(*Z<+@Sz-FDBSr3$+fQ4G=N2@w0&s3?YKtv}majXm zzpb=DMZ+}%IhSnoi{^Tbn?&0)zd>n)mwBNFcb;4-AZR134CqQLcUJse?-JBmV4(8g zstYDT#*1M4yE*4U!s<*gzz$PpSGnOQlek1`qWlLF&0!5V4>vb6&n}ck$JZ-XTJFqh zuWisBM|HPo*PIC@F_qtZP$MCj-Bjs0c6EQ|Q_boB$b%nqZ{30vzoGsqyobv0kC-{R zaBkwomZT!_;g`>cJb6baA1m$TgX(-72x=iH5HDsga;dz@mr<~{0kV(N;A;aWp>(S~ z?jHK?$>n7tiQA_4D9$p6l&K5D26lGurxH!0xW0+7J$ezBN}~&WU^MDx-H%D%(?RTw z%MlUy1vw*an#H>_bL!+txUTivD&{&C~8PyUxYg&Ap0W z_TT9mE_5a#ABjU|fytZ(2GT!%#3vwQHr$w(H8DxwEz&}n%W@FnXs2m2mN}Ek(@i9K zGR<5i$#~Cx-J2EkIsTI@(C2)EKD;Bm0%9mSY`m^?Wz5dgf#w4F>%u#4;2Cl1>IXNT z6SZWVkKU{;MD;T}l9nyxFlNi#aFA0N<&INlTV~Lu_{Oa5XP3YGdq4zhjRcV?q0ozB zsKCmgrihq7B;v5YxoW$$6qj~|-PU7UiS^`HP{{_QiueVTV5&2osP+Ob&(g|D!P{w@={`~xY|y6LH)#m`@TTP4$bulxwd1Ph8o z+ya~`ICFSUU(ZgII2=ekxnAgMbaYr)f0x$mY?juW;NcWnEW|g1s~!FdVqNFo0$9?D zcnZlXCR24>=(O)H!h3@)v*s|hmdM;Xx9QtH#QwjOxY=FYSLR(+*B+)w7CO;hcfe;v8 zqF*76Zrf20OF6cl{dm}s3&v2v6^tCmCdld2gYH99ygMjGz7}Wr*7JKQvNm;B>sipw z>h{pyyRnGzkpPquE?7OrdtEA$@Z$W{GUQ>8MC5$Aok`!SL;)T{9hpvV_)i$G6!Ms| z`-s>X6^-7*j-m?VOgBLatk^xjq{5`DVn5se^?L3_hU2?s=U-K=q5o4=W|MC23cW8q8DbeySxV8Nrl%rFj2tHob&|DLC4Zppo)w(#oPTF2q;W(+caRpUec(Iev-3UDnbp-n2+N4dt}J{z&h~U2 zZBd!=z|`JmUhX14Klvl^iZL=^gpX@(e3*{trGUgEsAw+^WJR!sL@{5*t{95~S}-ag zL;ymb`PMYQzSIV?%5X%;iajjXB#;Zqnd*(Yb{$vtvcV8_$K#at(iU6xs-+{+40@mL zmsR$t=&B*Csp0dZ`h{e>Z;=^Q9`y^4RB3Vek10M$k-c>vT}YBblMM?8)ygkx8Em>9 zNW8OlNu3bfDQ6R<-m5t8ck1?RJ1$f@xjF>0qzfe#{nibK)&us=P9u>|8MN|Ik$8GC zLUFdAHF)eMYq4l;$$+fpffo%iG~}3q8HA1Cu+x64;%-w-?uO zcMlRJEvPLb@$$fy_W~s*>6g_fw#2lMc|m4Q&9$|EJ@nAtbP31%^z4sOGMdcMq#~LL z9Vwg>Vd>&+u`8>I;-cI2AkO{vCDU(iwe=$65|Q=)A9UgosEq8B;bf$T|?iJFYbx z3H6=vpAe%Sa>s~!1XNt@(TO#r@+_oShj9}VYe&}JoMb#w5v;n=ZUe-&%3ll7&94U3 zZ(-diCenx|KRVH;t1d2MHNby)s2V@N_IAukRGFdir8tgE!}&F~{xM2%m(eoS>H5M6 zN|_55^cB&-H5-~d#DB!)zu5fz$(BS;5YJsP z*2$qTy+!rL*K!BUNE8>tCchnWC+^swTgJiZt|t%}63wO^{E8qssMiIfI|5plNo>*x zu}lU9DoQa(nsH>ox4Wi&B`k8;WTP}6kA8#Uyb0Rxi5tTI{f`M5R{|TE_J!KhM5g7p ztFDAUHp>n@AH{pTs)JnoYAjWVd$Y~1mOs*<6vu6Xg4WKpkv4}8PIkrnM>hWmhU-R( zz4snGSV&q1!BsmNPxwD`!NVHWb`XHcV8x?LovzBi!Q-lQrCT$&&>ZmeNQ_ERmYz+! zSq8|wX(_sf?N0yoaecSXGGRxFA1I$PfpeN7L};p@;=wP6;qr`QX1*&;EZ=CYY2WP& zO%~Z$P4R=V5RUB0i;q}gdUV8_c!{D+iV{hA_%98G14&;cEG2X5m8#VqTVr|s@Mfth zVF6Wynzx`Q&o~a3>ApFeK57~#*7yx{r2v^#WhBbLFtX5{uK)iw&*R!<>fMxJm~Fq! zxhcZU?_-FHKK{%SI+P$VO=N1TpU{`^D`5I04!0w{kNICkNm^>QMx`j@&N4YORtTHPoc|?W~;9 zvK9BY*;)@~K5`g1c5{;_WBp0v2p}EXzdtji$r!juIgXe-U(8Pb<34S|lf4md@BpR9 zhc@fnaICgaiJyOX^u=_<(sgd}!D;CVkV9{kZ9XDzlp%=AJSzQOMILW;_AHPYM2`86 zFZyR%Y?2Kqa_kZk@6s(a@D+i^Gq~MTQ95j$;8X#$#>{c?ve;g)HHP&^Dhn!dzl1i* zx5|=+tdu}9Mi>GH()-Sf{l5*7Qz=MjQ}tawR7}`B;~!26Sqbt ziO>kw#evba`XK&Ea3{C_M z1uf$x;xH@SufUN$h{d<1KZL8-t=#g;ebdM)$8L`itkogjAKp$3RV4(PCpT~~p`-Wn zP17{lUW|tlGhuIQ?I8E!&gq zhBfjO0>5fGMSFwp#vs1UrBC>@E$$17r1bX?*jjw~FyN)*Ls~lX)@ru4G4-)`l(^@; zR;&-6zg90ukcsMHBPuoAPhXi3ie!Il%r}toYcX(RLYL^b0N$n|*;4ZHu(2lNxR#5&@K7>r;twB(iId#@hIK1doJ82ABH#hYI4G~W z3QMo2^{s|R+0V`w;Z3dTy|1S0+BmB8^v&TH%PlQK%OxC}9e1fHw157H*TcNHqPjhoXf@i=;BP{5 zMwPXcSVDq@%M<79W#i{_^p(mHWpOgiR1^7RB!=ljPX2x8!h-}IYZ4L?x_f&3u@Fqk zH6e0+-Q9H@3T%HruLtmXML2B7a~!yMD6y1E>!4XtzanL7Oq|Rgmmpp7M;8)oh<~8p zri{!mq*A=E-?|^5$iXJsFxHzG4;=7l;X}Gm|A8RmcBf|Y-(w6BafjCVTA)iH5bc~t z#pA9MUXBOHF!N9Ytdaz$B#Ybb(7=lZ2weo*%Qmf7g!LFPGX8(1l7CL|O}LRr7}RD4 z&Db{|WXoK?e%-W&Vv|AFH;}9LDhMdb{d1+H2)i;p0T-m*frLiU)q-VooR5wjxqv3m zqZRzMh~UDHXWis=ClCWBQUQ9k(puZ=xLL`)WpU{X;Sw`=67=OtBPSxd$s0HrT^;#> z{ASrPfUhbOviqj?l$>EK|B^C0+l|J808fzg*SqAkusOnholn2diMn4~fBT6dx_aMj^vS zPfC!~V3MJ1hBC6FkFXHDoVNe*sofdx*xd_KRSwjcUv3w0HlAQB&;DMpYr^;lnPu0* zI#zW9?8#*ILv`sR+Hks@i8v*psZUtyu^C|et_vUtun`(aCpQkd@gcsSkl^(VJg7*m z50WKDl(pj-+3?M7)=IyDNcVqTfD>fo!&JFPrc^#$b;`o6)CMbk}jYaB* zaV;rg05MuqD;)x=lU00GH206c-yy zk1tl#Zd+1PQ=>v<5qlx&Hfk#{|IQK`t?caVJSaEhlurn9I2O;QORKm095;Y&W5xT7 zZ~*}#3HvmB5S&lrF^r4tn~Lf{{4JMx3X(VHRUZt=bJWSo`U5{*uaj<=rb5EV%6$GO z%ZFs^dWLWA5|!*N=n0isA`_V7vsQg*sj0M)_J)HUo;qX z4P+lFHqY(Ag0s~bp~i8x^c9W^YJ@@c*GuS#g9ghS&%K&8BAdJJ(U}M;k3;saZ0JOQ$YBoRVMu{hjmF!os_Sc zAl3wSR@%>D$!0w>Z@*|+oFzBZo`IpBD(~)Y_~R9%2Itp3W+L{JL16igcjpG>nf~S- z9z{&PT}bI(cEFOweh^$REBN179VaRhvTB~sM4-t`dw#vjP}ZX~B04@DY(7gj#7ZS8o-fXO&#y=U#s^M8DwGK4@1;>95$?I|^P3gCPS|ooKM8I5ybgV?Z3Gwy*AI(^qEbo&t#|3le7z;-;(%^lZcsgDNH+;VmCBoh0JH$J6gw($B`wrovIQ1rB% zq-$rnHmeQjU79@e85Stk;<{b-2tN3+xU(sVg@*SVT@JoCRjx(z&G z%KxK0!(rv48jT8o;uHF#qojA~NRDo_I=$g4QzL8w?!>tGWNyp`m>N1A3qi)B#$}3o zO&d~6NcT=&5+iEi4~vyH4d0lFs!!i@SlT^VeOWrC^4AOCIQfGJ=UQ%s{F?uj+n%Qv z6n~^(o>x61*S&@3W|8%$k;v2irAKl{&fQ_YhgGK~*Ron^XNJ(5oTW&XX7}&PJcsKC z%xDgMaG`N0VIETBgg?WmYPI%7lN^M8i%v9W!(dtZz%pi*D^!Y1cln$+-W+c!BQd*o zu`i-U63PNFZIr;g)I}ZMcAg7C z(z1o;$UTa)$q03QJPOY6_j#=(T|ESE()>}iU__Fnx_}l``cyq+rQ3a)Bd7iP#+7tF zQ>YsNkHB}>4iq3d`C;Rl_OH8x7&+Ka7GIqYEIu{^La5`V{(~!SyF>U44#f`dH#r^Ui))P zg+VI+;6axg_XPxous5mk#8#z6Vm@oQyXpy62XKm)t5(;8^QHT(8UYS}U-B}h7Z=A& z(fj?~GH3DUx4wMv4u5ebtF0CjQV&$=So5Sf&P8m$ZC4w@h{kZs0mx`^OJ{eZJKRJO zC4<*coi00hOB0X8#G=_sWCo_Vf{gDy7Ux?rQV9AzbcCe}<>LF5A1!1ish^+BMr-)M zitC@kKY1LCzrTnvCYn#VKJ6UEdla-kCPYjYje*LIGqC<&g?{Yu${0=d$`1mB3WWuS zEL3f!MiWwTo|?VL;4L0FFSflN!AZ|QsJtJ5SDtWR_4veR>V-%3lRQC0m(+*aRBC*^ zQp>uR&vSlv`rnhLD6Kz0pW1E@?vmJ7WcKqj5PQy zIu|{%F$&E>xW=I;WtNL_?OYd4HqoQ*+$9Gw=DiZ1L($-TcF=z@JMLz1 zPtR@e<(pbqw3vB$%4>H^IDH$o5~)1+_2o~^vINu`fX7@ebJlZJr4GZ$l?LMr{2Yp) zoGJ5d$CQy&9*9$LW#nmWfqGA7@%wtI+eHRY=23)-Tp6qQz=tAZt2{;xj3|NEl`%G# z3I``@M=E#?p=uACe#a-DPet2*_R7Rm0{fP8R-vm&s~uwtA2O!qF&;R`(+*A)+Up-C z8}>e^W4!PuT5O%&f9UmNnEf%Wa9GW%XyaFm?7<{xw-tt*xHEk zk=e4y!fn|@OuoP}P;L!?u%Wq<1f`8?{CJOn$J?$ial$)B1vm39@yoWr3^?t& zQ*-j)#_KQBRXQhvx%RzK{Sp%A9dsfEbo^eK^4-g)I4QTpJ8?Sr?5gLh>>l50OM+-> zIpKbNLlexAa9WwAy(NsVU9G67z!VFZnwq|Y*}9pTnL;OjkmS}tC#YSg$`cc|ObO=` zsJaf|J+?O7^{tthBWmKt8G$5Q(^rXw~N%O?pY~cB+)|Hs)|09E;JZR4Ai zQc?k>Q|VGVML;D~LZnMVx{>aX5>z@wRHRF~5u^k}q&p;}rP;){HlE)(=Y7AK|9taj z7)O!4_w(G(eXqFIwXWq<{Fuo;DnZFHu&T>Kf}-1El7ceO+08d!2|cCH+=QrBc%izm z@|c>&l0fA2B5RbO&)%f=NN5;dTY;DR{K*nG{PjzTz?lDCGw!{&6}{z4Lo3|ecb22o zUma0bsW2OKO|;g~bcy=cz6t+sNh7PSgzt_FwsSh$Cnmjosd12NCc2@siCwagv>eO# zMJBdpexrx2>GRJMCh6=W!>>yC2kH&jUKJxY!*A=O6)_vD)K}2(&e~OsWv46q9W9fQ z)w)t$kf;gNr#X7oNq$(u`gEkeQ5<-q=Uu=T!N!5L0J^1wbMM8jPu@#rcZ5a=#D`A~X7(!b39j~8B{8n6Ieb*^ zzacu%p^5k;;d~jfd3L(#Va#GoU5EU#frcn>PP8vL{Q9~;OUrDp}j_ zf&DAj@_^MjL)!+(fdlvYuQWi4gqO%$y;4CoV?t_?`Je1piqx4=3W?%r&;bZ9as8es zq-KH>%EGO%dTeN#SB_?Wb^FWGivJ=ruet0iYTFEADVX&BU2q}c znz%$5j4lXcdU|%c-;fCks^8kQiVXb5S}hP8%*I|Lvaux)NkBmHCwXXaa3OV2B2)V+ zm537#fU3YiPWUn9IMa;vxuU}3rrgZTj2jBvnOw-nsvYlJ*-cqGCL%47C8WD9x9+ql zxQ=F^AcSlq>E_G$!^$I&qw?XzX}ND(_Q5J60YzVccTuEDTl++7HI=r1L_LA8&#N$V zjV*y29;e68bsD_AzO~&|RBVAE;W`ETE_}D{vPfITQ&y{X)9ePWl@ZJy52RUy3_7rwCoG%k>Q32yMtI@o?oeNAcleG4Pa5 z<;UyopDay@4ZhQ3C??40IBWL>mAP4qN4KCC0~9{EhBhW@$U!QFYe+@@>1yeH(x5xl ze6hwciF`iatUWjbt{>C3^?6tl^~D&v-4gFxDNa@RVD=;kjOiWY7rXqfh{dTRoS+8rsNUWhjX&$i11cJ?U5q8= z(JN?Ut zLn#FzFad!dB|<`fX%jPr8NUtv`0U#qS;pBdPxz@BGMS3xlC12=SDn%AP&cu3vpkqf ze@p%D$nq*>4G$OI*;critbB0B{ve%%kTq$9tgv5ju4Sp6M18=H)q6*MkxZkP}hTP-T|!Q_;DJ%3q;P8cHF+A5eCv zFHu35W7&PEo3K_fE$09M7YwO12eFf8nlWEi@~|y62eJTZ2~Ia}HV+MlsJ9km3_kv5 zS&F4}VFZTJ){Uc2AH8SkF_sGTbgNY|$>4T1H9{-=_WDrhaMJyF_WNU@%@O8`F{Tct z;p1@=TDR};sVS^a@!FNIcchVD z@pgQUlg56-?1uO7uNRc}zm7gH_44E9PgImKvfG%fL%Aax8?L_+B51?0p$Hg?1g%j; z?z_(r=zDSQv@&}=Q9$d6aRqWt|T{l4@*ucjr6v2;Yt zxI@#2U*v|zug*8VKHb#R)CjAA;wbe*k}%m%-Y~RYdB6B^-{8Q&;V;XyFcVbZ2W-cn zbVmn=z}EWJGcvp&9@G2x?gh#U9bPKuz_D8yDs20;GBvw2LW6nNgaC)wK8h|$8l_$D zTt=v0rb$7-5G5F`KJXcV_@lvvSGk|PexLLmv!Q0(m6d_1{vS49k?L}*;~!_IDO!uY zK-HFZcJ}MK_0-T%NJ~qLh052{3c={vaQu%qJZjD2Sf=~+-F^Jz+xD>5*4EG(xJPmg zm`TFAtoJb2mT0n~!n6aLrd`c$D!K9vn(*OT(>pjgfMCM!Tazf)wz3O-kio4+O{n1% zTWz$C&D->OFH^C6bbfV2qaKtjy-rhdsE|BSHxsla4Y!bC_TD$*2^E?YbJ#Ny9IW3X z=g?+IEna-!(HWR8nY-I*yjZUmDd;^iy*pH7?uY3kX4L4hk-j)hF~@HR&Sv3TV!-fl z@vAxhn2251s5d2!3*T@LoYJ4*8D8RXWR>l5e0HRf7}YiTzdW-SlquA zZ=*UhY)kn50cA|JxugCIR)XeEe=12IG9?wZw;tjN;fk=liJ z9j!%ad!wwv6U5^v1Su`E)YxKPy$bl-q-U>-McW+smpu_fm?LD=I1(-nvkQ@E$g7r; z4^+lBMj@Q};pP5%y+3nysA{}v93}5p-94tK6|=Bao}S(iwcBvaf3?sh^LTghWg_1c z_w2Rn!1_h8n^AL|K#xG?#{-5KItd9H*u_wXl2Ol_Gt%38pD!6&iM0JFRPJ6$`{B`Y zg}6HgrJo&KkEn@kC5Cb&r_#kVWZ+)Gxc<; z#o`>|M1`@az*SMLvTJS(nNtqG_wXI2u*z4){cOzmF|_GX=kQGr>WXafr!o_dshf?h zIvn#C@`a2bEwTN@aJ;8N{(Vd^kTy>!un$0$MB}AYgzf z@1NuMJg8z}VtNabxnKn>2lMQ;4DMyCy#q@Ibpc=mK5!v7pP7{=#GVe70ujnbH-g2MAM8pe5ISxg#%nDna%kmMQgr2qNpM4%(cCP)it))iAD0SjA-GxKXb6No3Mru>8HFJpO~F!W9Q zA$GG%wBx54tPHbM(gI%Vn$#$f~2e=I*^ljfY3DN-wKqU)VN``i~O+ zM7);sukt0%dr!>L@DIU9-w z3p-q*;TPBYCuq^24Bd>odSjr9gY!yXEKi1i+#ILfQ>L*itToRqLz@!hL{e><#SOQ& zw?Pk?QjBnTPEA*L?dQ572W&+sOc@nZKte_Emhjv;?uYbE-rCtg7bk#TVsH@;2WQa6 zm#uGWa}yDFR!3iWszM=Be!=c_l`I>Ryr0J!f-hbnH8!=XYpxhy%66*m{ZBBg6(>~s*2Fqqw zi&e?Zl|T_lX`pf}yeAYH9Zg%hm@SiGkn4{NjyVXZoPFjk;7jZlzES4s)s1FrfkM;V zw?uv@iIe5L5wsa^7eZ1%1b1EoJyujyOW2k^nwVcK5y&1}q4sh*PRdX9>xi&qS)BCm zz-JP2&Aw2UrD)6&ur`ntEzWjlBA%t9w_CTlf0C<_G@K-r^upf!>PYy6axWufd-wH6 ziqizUX&QufKG+G1@I{~{!ZV2qljI(&apk^cGmZmnfK+AIse2LRg2!ie|%NFvq_%ygfbK|~(0*WjKrLqTTD^Z#pU%y6P zON>aRw@2uagwc6y-TC&Uq~(_j3(Wn}*1k5Xm4p~pwxmBldycF44n%NyClLj{sn5E8 zyoaqWDf!(PVv4I3Wlf4Au&9Z~J5%{}+5=rCd1pd)$3Na{5t-}jH%&j3idA5ObTeIvR{u~SuwW|oT_L}K!5CHLIFu_XvLp&6X#U|x7je(k2_o7 z(llp;qXGjlt;Q>3{5Eg@^Kpj>fC}h|=1YJVNvZi6;Dc1hI6cVKnK>#YuR&STBA$m4 zsbgOYJas=y`2&{lNqoRBC+W(3z7Es)-XwDI;gRS`<$gMwt<{qk5H&3Bh@6|?Wgn`oeWQmhsTG)Q(v7oL3PX3{u-umYQ@@|fQwydY(Wak|?Wn6Ffx}t8Vw8x!l z2Ej*wwSBjzb^%QN+jC=lF*GhP0Zz`i9E-hIrm5{Yd4@xw;0%b8!# z=lKm+J_hcD-_JByh$evTPgAh~%{&Ml?L(e&{_uZ$vRB|@Aguf({EGV8mwPepxx5Rbl&m@0diFyz~$r+%QRv&iAgu6w7+Q z`qPeGEN@ebch(I>1hby^A@qA@LxJpy)z8VxOHtz+qz1L?paxSE5^??|u@uR9e+#vf zVBvnyZvXtz_pD*H5nRicshmM!X3ev;#}^U49MVge^|ePi&8uv_ z#+Bj4-y73K?jAI~2x|9Y^J`*kV0d$n{>?q&8P?t*5YPj-03aFCYJCW=r>qFG2cAWb zT^xQ@e(yu9mvL4f!j}u-wYC--77++WvN{z${*NUp@q88D;>ITYqKS^7hgg>Xx&p)P zVdHB>E{SB>VcQfhZKlOAoC#4-Qn~|HvwscDMvOjwT72Me3&r&}kFW#I(-^vk z3YdUqNvBZ}J|EY?Ho=P=3UDqD2bdNPiS8ANYu0_}9SL<#!jqm;YC9g0#36GdJF=Iq zasw(M!>7ST z1DV(_)9vAt29^(bLU{`n!Bw1wzz zwHt;hqQY_LvzbBo%F+51K#>e6ilF!kkIi%2(--bhuaNu*8Eb0*5W(NCnCRcj%YJ?4 zm`%?Z@on*x$`ys2UI@r@JKA|EcX-m z(*h0y!BybEvtLh4^hnU8Y|0p{n$V#T7C)Bw@}gY`A??>+3zihC)H0upu>4z&GgV|$ zU@GZ7z%kryE*ftR;xyCtnw_IZSdz756%ADg#PrDRATi;8U z!wzQs?;bpH{ln(OT4Y3PV%&xc`i6A9igeiI4(x`#rVx#c$QeXu~dTC$!srN>~0G{gbj?F|oI? z8v)ALAaDEyrDz3x0Wh*Eg(M8%M6;fhCRk{Y(WUrov2dUVGDg@Y*l@I8xwQQVipf0- z8x!Q>rrE z%jr$qs>HQYzkag6(FwYtVbnN|wbJc^D1X1|vx}BnN^z~a;S}v{HJZ@+_ngMl{V1Am z&6U@!v|oiKS72{J@rho9kDxDR28E{l3oxanY6uMC!zD*gHg&I?wAi}d80X2!!O=2V z=Ydk-Lx5Ewz48ev%l;tSwpoiBcLP%NctJ9iJzE?kZ7UW>a95;~FKL#(*lpv^V2H=a zBO`xd2Vae5xGopr;vSOogVrCqyyeSMmhcJ!boFSLLI2^9k2k-P~td(=D+@% z7IC_-pRde`hZtkfGmNfpL_AM~N+ImNrW(m(8mFwJ;r-L$;a*^#k3s9I)nNaH zrPKbj(7O1D%RzyROwL#hn+c}tN8R3#n$=g@0Y~K9_f#uWnL+&3tfscMHhd_{X=Eh+ z8{xyfoD6pdG9TADIAnEosWB8v@n8pJ2h=YunZM781>EVtlUaI^g`@fA}yn z-|Db--3{q+) zPoL5Y2)u5}z@8=^ zN8(U5z6=Ssg-%6-wexSKwg8DFJeVFI8Wz@+$Ztk-FwMPMe9_GzhC+pT+dRY((>@C4 zSlDQXobJYr8|~lbZ^R2c&&bGVuC-R_jQafZwg9VXbC%|XyCyguRv3d^uNZFH>ovA< z0qE|pe$5^^rV;Tu^=s-nd$YB;SL4}f?pnGLS`o*ajwe1t{bsqr`3(klkQZadtUvM- zi;jIw&g!{2)p+*vrVO}ecU$2?3shX!s8D-}c6%o-{=dXSi zcb&)yT?hw3>mB;@ry9sE2u<_^azu8B@cGV!r{hS1^fDtOV}M`AjbghQnVr2oJas!p z&Rv|Oc!3$hB2<|$nF0ew7QJ;qITJK-dCdz)I`s%czPOJsK&1Hl_m|)KOyGQD_WGE} zWNC%VFc67x+L-dpe|pnKy$v}-%edP`|01D20%=8UYizN`$G8^a(_F^2*rEr3 z!Y#=P%&|ceBSWeN;>1fZDG6$B0K)+o2UZGnVh{#=iGkM8kANX0 z4L<>&N_<5X!|HMyAhDI-bK^Zp;Rt_>8SF2*H0=H&C^&dF(JXD|M!UcVK7RhbsVQ!% z<4Bzom9)`TVgFFa7K-3xm2uI=d2+gWEETaW%=&{_d~KDH80m8SM7oEvWX)vkXs!3g z*XtvQ`=4j!z-DfsmqA_}Gcxeach^Y7ygpyvCE$2;!FSK5aTuFI3))zMT!)?_-ww+Y z#lDQ19#+G)4OUBkCbxS-ThzZVY#36c+H#hFfg*m8pGIYJP_9F4{?0ALds70|2v|5! z<3T}lBUWEky#J6Q%1t;(L(>2?fQVcsW#X=ag4Dx@G5zS4^P&k5F8+_=mDt(UN!SZ9 zE_IuZ?(QO_ob#7qzsr>TQntNj<1^Ozo?{W`oo^~5gd}#qpV8JoD^^_chTvN{A5UnZ z{hG|}+c9ZrQKYO^9cN=xI=(l5dGu@TF5=HwvthPDF?$~?R=THr>?!M!SRI0GUNT~RGw&#y z4DKHz@rI73%SR}v0*f8~{7ZZQv2HME8M=PpqvbN~bE2O9`0fY~8N?19UZjh{9)MCi zNP#L3bk1Kx-Mm8;6^7v>O#Seb?(>ww!n9=tKT* z$19N)*-YS<6^kgm=B*t?q=r>`DJzVWB97Yexf z1rRLV0@Qm6{Lx5^KPW<#2>rP?Hq*!-A?Mk0KZC%B?qiz?7g*BZUQM*1cU~D2jbyj*_ z33!DvGJDFdq|;frPxMU+Q?RVOGBz|t?2!!xGq7p$;;wOx?eCyn)DQo@S+>24zo1Qg zZSCdzNYgHiMh)w6$8EhoWyp^;okPX!hIeKUabCnQ9%KaJkhC3IJG;@1?r#Zu+BXS@ zRX~_OTx*1zrlexx>lu-?c42mI4wNxpCMUD+y#BBSWeWN#-3Je*Z*}B=SSI`jXe$Go z6~r=UpnBcj+iRBM1>xCzvj`tzrQ2gsS-2oVVg4eR=g_FA^4+|Ewa+FR^mb8nvs8wT zZ9jgg#Xw1|JJr9I4T@{%7}xeLqB=oj>3-Sb)D&fp!&o* zw|{E1Z}rV*GkIs}1X7zI&w896*V&DXt-|DkjwjbnQ$9Oetr=LAo=iWQah zT&3!0N{b1W*4Aa+$U1Aqc4dr*3V~}o2hZ;oojt8Pv1ZS!u$fjqt0-@1sIX~t-^ah| zT7QjfLP=+`fi}n3K|?s_W`!|{D~DsVm6uK1k3lR?L8r5}V%EoN1o@XL%BBRLWuDTq zm(NJedqOoFCrOl0n%7~y-+KX26}TT<)nzPeukB27+(>bX#5Vt9yJM;RYl( z;-h*DF*xDbl#y~QCg)FV7M50-_G{S3+g4VqbzYk_df0)En8b^rHF=B}&A$>7DAoxC zI2U?SJ#1;Ij-t>K>A_DS1o0^+Ps=Y4`blUuth7?!C(pwWoG2Hgh*oX2@Gxk(PgaL` z_Zc|F&xpSG8gn*YsoJ8%;JkGNuc z>=>bgi=m+*0ge)WTMqXMXD_?h`c8R97WtAPj<7rtDzTMS{hBS~;?`K&K==itvVSbN zT#t@HAXGX9ZLj`wlzN%*%_|OyKSk0`(1&Ba_PwTT38S>%u#@y&@Ne7LvM4llCY#?J ze=^azW$HS8z#M<4j;4kGJ}IRAB#^kKAJ4PCx`z&pX?XF>B)Af%2gY17H(#6(wB8Q6 z^!%qc5R-5RiUCeHB`30K>Rc zAH_gCikfWzXBvAz`rkEq)Zh}pNh1?K53cN@AgAQ+2iM8+gwjm^Q|DP9P5Hm;JVnpN z&rt|z2;Y~gZbmd|GvfG&rK8c$x-l{k{tJ>UejFxhcT950(*+l?N!!qV&9Nr^6p0J@ zk*QQ&J1RZ@8ykyNN7wO{*Vd+>)kebvcZk~mx+G|5n%!?Wt5Yq3QBuuD#W)*5P}k{6 z6{W1TxN>2dIR>W$)Lc@S@9Wr@0cr@KR7U;>T;Ij4WMX1SZ~t6E`cuiGz`V3B#YD;V zHGQ2*E*)BUU+vn71V*BoIMp<-2@KSeT!On4*-hF#5kgj`0IdrRt+`QUyaNr1Yi~#7(cmSe}!`37Z06zjEtXwAXUm_I5Qoni> zOR@hc3CPY@T=$~~Gy71RW1un|1iaXQ zq5iI@hzO*~eGb0R!a+gdWo`KmC^0~oX%79vFl7%oIZ*N2f=I4Jw@Cmd9ALdItfS_Q ziFw)eOSys{09D{?$KuOy&89Nu393m!NU5 z?I+A&S7Wy1O9UbX+VB;$%xraSZFZfg=x8%3Pl!CwP->P3P9 zU!NCP`A6AxdX8*fNKM}kMZ&Sh`Vq7Z>Nk3e*czUg2#)U42RvQLXy9{Oe!IQoX6||RA9`n zsIt9LtR#u_LzTET2s7)fmzJ^IDe%s>LT287yyI)j3#9h7N~zuKanmC^EkML$N(>Yw zChorepKWLc+b~jklN4;}kA2ST*9{Oi3}g{6wexYbix2=MoY(HhLA%So>hLPIM+i}l zrXO8#gf#tmHSImv9Px@^UN=k3ndn8hZYmg@ixvbImi+$;riaeKoL)q+<~NB_0e0U1 zs`%tVCr0KPNGQPR5fKpqg+7#-;d2wn{m)~NB?PUm=e4@9Q{aGwLWc}cRij^3$@=+r zrT@q)@AHP0L_rnfa5cQqKGCW>ZfC&uzFyFD(3_ zH|LcTD61vhgxVio_f^IAM~$NJfqzH!&69_P0zwXU`rEI!6)-sn<_=Nu8#=8FazekV z`!Bj!6sm^vu5Zp1Iv1gp(SP7sYbOl7Oi77=3x!&h;aIsfjHP_#?TJjFs*9egcJ9h8 z%&o7dDXjhT;_=roF9I4-7XtZcn%#pkQaC3Mry*ED*Smci19X+4!TN!!>bBH{(EeS| zw#+_r&(oVk_EFPR(tfD70PYU}K*ObGgnJuP(O^Yup*0;fPZX{I1je1F?mf`|IX*7N zIhR#Z6008tbu=^?bq@0VcO${LPw>oBQ&Y{&&86hzu;GIcjdOUFVN@@bcBZXME_f_2 zFE6dGq69b&7f8-Ol7CjP;qW5D09L-ez1^-_iQ`HpV6xD#1{0j;Sg5l^wwAkjh2J!= zGBaac32)^l9h8+7;QtQ+8)LN$TJ#KoY75lRg=VF%2k^3vzqYo*kbA^N?e?V~{ry|(H5($-CLnv7HLnC04uD~i+m_Kx zHK_HYwuNoO$t~qHvE|QRdU`Yvq@7+343$U=H4ZZ(TZ@YbzrCN@y%+$>E$Aplw*P;f)T!*7F1 z@#OPG>iD)YO^$<+UhiPolzBQ;B)J;-G9Xh3qXk1zjc5nEg+6Pq?a-_)-+{*ouWfeB z&B>85F=4u8I~h?~DP%SJ85g#Gpd!nc9QE9B7FFMFRC(;JCVqDmhGPeF zGfnssyXKr-T;6^9q%d3#+Z^@i;rD~M@1WGJK&lYn1ekhjG?1f?u+n#nP!<|%bJWnd zqEVm|*3+Ym5-&`3orF(`vD2>s`vUzc0Fq-AG22q-2Ep`^BIgwih&LESMLovn%O!DR z+llsuvGa6U?mU|nyF`I<#_yiwk~T^s4mNMU|M>CU`}Zx+Y0N^eKNuk|w9nJ&=<32i zz^7DM`G8^{Tnwr!&2p&V4g`BHtHXY8*dJG0X{)QF<`h8SjFRZ4q@>iWap8nQltGym zioUkD;FB^y3qtbW)sn3?27m})I7OvfW^LPL-+;A31rwa{?*8nH7ObXqf{iUADmr1K zt)t^Pb z>3O0}B|a8F7iKkBYZKn;4~^$=JEmmJZ$~&Q>&nD;gL`eRi9d}xwEe`L)V{&dij-4l|q1qKTkyR>Vg~DLkwmEE9H-KU>{Nm*)^^~qt8v5lGX|7(UWhfqK}s@F18ozr&JKW zSUX`Nq!GKWh6zF8G;F43*MCzv3iTy?Jl^%`razSjYi8q7tXwufZ&DGRo|Y!@v|-4o z*e##Dw9dInbfXp{66BdAs>y~?F+eCfIcW*mO9?P?h{h0F@1;GuXOKmsoC?HfVEKda z$dR30nS?EJ;SmwCK0Xa&s&BITK~ALa`*%8y8%$+nVk2mLRM}o$4~XFQSn-FSmUE#Z(H9DJjFi2(6o-cwVIlSTDQP z3-WW+!!ZAw5!{P~A+^Vc-g4_P@bm%T#lWOi+>L;?Bs2udbZ4oi-@JMe_H#^pyx?;G zH4q-M+x@Lx5snw>{MOt@0!7Mk#C)y|Y?C7MahOaDE9(ny1L4U#@NxgLoiLsLQq~C% z9Q)yqrKO~h*}{=RIa?x1%H@)xiIY7`q`&5#J=U-mGi1)Pa&jV^&2Qr4L%^P9S;JWA z2cS{oQd|$N6kq=n_(6I;J`!+?36iVP1H;2YHqQB3tNHjJSHiCXxsDs=vR#J(?Pv&{ zf;ZQBik_ljrNAkQzO<^5uN4H7=)ibpK%NR#5!L?yRLCfZ;1T7jMEk2F0WiE=lv(8p zw_ZfWl=n4=acW#QT-?WNYsCy(mpZ^>H$zZT?6GeH`$3rcdsM5pR*6>(DTLBsAZzb% zTtSUK2cju?eTv;{s1m^Vig>0)n9EuOuQAja`SAWdaQMxe^&g>#Z<&45@GO-Fa>1NJ z+-G+$)&}&{D{LlY)YaegmqQq&Y}_ufzNTIS4jp%PcJ{mt83u6=%&;bK0v#Y41lVbOL=S9b! z^_sdg#BrLnZpHWCD1xTc-s;F)x%*e$N2c2g-KZOeF`$8q<+tXr-`bj6yA&yniPbkCMShxhcYK{Pcvxv!NCjzF#)l+LZeXhAYR6dPfo zlZ@CqM-*q)RZyniE-Jxg@1(Qe{^^uY+h_%KTVxi}7M8JX?ULAtOD*pq-Q38lLo)`) z*W4wS2&bVeFzW6X`oKkw5e2rrD~2Rw-p*GOJTu4xF^G#(gFCEr-(`bVbC3!OymIY4 zLqeTXSpm^I{|^}zQV?r@&AHTY6{w>euOf;y}r?USA@V zKO}yLc!9AQtAIHY&TZ5a%ghCM%eCpz(b4aNKQJ3zwD2Pf;kiMFnaoDzi2DofbCSk9 zHHyJ>o^*wpazh>{x3zz2jXL;$aS2cI8Xr{Z@b^oJ^xv5x3Wme|H=G1_)gK6H-alXO zESm4s{e6efP*1i0AXPk6vS_G8^aA1jMhp|kfmNsmLxKZ%EgZJ11@FX*%-`r()S6XtSNp;(m~9?1V3N;b_^85Ge)JxZR?#XpNq0A zMY&CTYsrUj3M$m{R-cL^qKUZ87uahFYO>9@7Q{^8MLp-yXe|XmN7QlW6 zT2pG8#?;AMpLfxTGo(>X3~=Vp@eYPRcY$g{%++uZU{vlB7J@m`0ObZm*dCK&j8IK-$$bL-oq`X^*WK;ZoD zXh{)JAx8w{78DqX3X9OLUuwM1Ky1QYA1Zu5uJZgXEiEl?YlmAqJFILXV1sz`$^)kf zKmG%uJsLM2)qkI>Tgmi8sWJ|QG6jW&(P$YKykcS;I#+@M`RPhMqn6gzE_1Pyv+^wc zbL!9+E0gHo_5q2c`Bz2;J$B&7AY=4PG24_cpGe3Hu}sZ}6%&ZS@l7%X($M}TiiH*) zI+S5i4Mm--KNgM)fxI{ZHIjK--!3PiMd3by8a`YgSe`fOTa@SyBR3Y9apRR(NHJvk zn~Tk^v)e2_M?RM@D!2~Lq`A;85rJWixrsSv_H+FP!F4P`^^(mA;hxh z|1_Hb=bXCMyrPa1Pfgk{+kbPx-)J5l4TQUDG>`c)1(L2ap+h;)M)D=;^Dmp5Mg10) zVw-bR(k;TxGhdQ4SvX7vr>3I8jW}e{75)bLWqW(*TFB;gC}+WA>?7#x=|PcBZ=toD z2!*20@k`v6S`Tcw5!4mWrE4VBo?CxDe?}DPNp`x2nEtNH^qwF& zVmOD~cD&LLGIYla88=+#zFy24qyWx`c)jiaG*}6;dZ;}$KlZ=I3!&}$D5C$g#YBs z%ot|zlEXhj8N|-`Zj+JtL4x;^RT8}_Wi2GM_VmfO-Zt%Lzw1gR3wFl0suo>O^eUKo zEMx_C(>Ey5uigg^t5oIM?SuAq9Er1IN1Efl*CMX#Q~{qR!3 zj%L$;xJG4CqW@W%-smIeA-Nm^b>Eq{hb>{G zp^=dwlWU|xeRT+K6~aEdt=T(SD*?26FCMfydi&gC8X=KobX+`-D*Bl~#{l@7{5C$0 zXufOVFI6N@$Uq5M;r{)(giL!lRdaAmmBTa z88%a{B3AM3o1#V=Nz4$qiL?z4d^MswvW5pMl3zsla2ho@k-#%V(Jw=zqvb*P0zoGt zautXU1fv`S6B8F3HJ%#-Q7S!kFYC@oUgHc^(_2(}x`k+_@j_$Vj^**(>TPxgUu&SdBs%m}R21Gb?H7mOw0y7n@@1t+%u?hq-^A5rQwS1qPl$h%U z;*~IJgiRc$_5wd|PGO<^?Js9fH%>F()o#?DHSn-Ws+l@rDmCMD7FzMfj9XN%e7-?c zldn|Pw;pDiOluT?oVJtajO(ns*o$%bfA0FH9b}fkEGOylTHTbVytH zyDoV7sJFt@9B6T5NkcfBSBV`9iS2Pa?p1CetNRYgy=;)BHhMPm{>`BuyL*2)ZO6Cm zN=n-3Sq$x#+OAZje^c9wb%NHGG#Lz1lN^aD&`5>L>QzHSd=x`}60uC!c;HdbdbisC zFvZ5{^Aa%r8Ur446F0W}DdQErQ_ZQ@PZF7Q%QeR4en(P~FrBT4Rf#B*=+niU=(Tb! z2bL|cRLkX6;qyne`hwKACd?(XlD#h7{X;1pPT|=@`Ge^$yuO+8xU;vr>8bZ+qvxFjD5`EDu8u^I@QH;FW=W}!!{rz2zt){aO2H6MWJDA?w z7zhM{_%&U}s)L28RuV9qr{*wARL5<7klLxp_ji=rZg#gn$*?F@y@BVO;1wBq|J0%J z7v-|+b>qG@tf}OT=jRg^S#;?u7);TlPsa+ZWitB zMkObmA><7lgdQW)WzRK{Oq7w1VyakF!g`mT zYE(B;2ebE;yJnUsUBp8&+e+8nENl)H;db&D;(Glj_Y{!0sMjWFVwNBf<31}8K4C>3m)F6cLL;aTBcfM)LUh|k=hJqT z<>3xNSE#jwZF$tX|2vhVh*=5V=X#7fClkck(gXQ@xrh7nQI+d^>8t(08-+h+;?E@S z{zx-Q+LvWU9W@FyiASQz!6B1a*rY=?KV*-nROdfhD>8t%(SJzk#KAamQxp8aKv`V|MK z-K9ouR}}^73;N`>a*FvPeXL%W_B~7Z6lt*%q0Z*I5ee(2P!*77)e-k@^u1Zf!HayDn0K3%ryhg5^dGm2a- zRQr?J5Z@$R~*EqUX>vyU3e*M~M2jQC- z@3zVz*G=ap%a)%xq+UN3?Hc4+zHD>$yu4*J?OfW+n{yTJF}SAJ+`Ar{VBBo);C?Iz z8@K{IZ!}tV?Q~%yWY(!r^&0$^Tu$%v?S-wt=)R%Q2V@dEYK^~^!)^9)MM6~neD*Qd zJ|2Fy=C$`Y2b8ApXe)d9$=8xL$PV&rOx69A>s+CH#t=^Kz}3y@rLB@N%g;G|Jlcn@iGL z6s@}4jm^wm9Bg>Zba&r-t(YE}eX_Lr31h?neQq?m=U(iK`t6@DS1R|pucmDj|G8QW zw2Ohk!9N~}5?M~^EU#{`r8(BPtUa=}&Ug3l@QO=T{75|3lP>9_b`y2+YUzD{JioN? zB`U)Ay|-kI@B6s3bsxD}=0=p79-b^x!d=9_e$5o#O5%Wof_U`*>F%w=qFTQ;;1NX; z72&9eG)f+6lrBLDL0Y<`r5mJER8lZNT3SlFTLGy-x)BkOmW~1DTN_Wjzc;S$`}^ba zc+O#F_U!#U>sfKHd#!~LM)txXv0CD8%M&Xzv*>P7?pIH={HF*E&Ws9mY~$BCPX;7R zTQ$i(XIC60n5lgBdJxZxtrxbLr(V6l;9jd$3&!g_2ke)T>?CC~Eqe;-62cw*P)qNSUe}x@ciOXL~L42-m1N zT)mP7>hDsa_X?r&stC4iwV+V1_5-3quCP%}--U~yM58&xh*3U-DxjChyo=RFy*4u| zEAumdFkpVzZo*M@b|28#h!=Z3e)vImcP1MUx!!jVPx9;_N;Ql;U#VemWrM7SUw0tq zPZjuY<^HOSl5I?t%KGLcbO_Xv#M<6>Pdt46W;{OSd&_S54de*|$b6YmC>qzJL>Du3 zQQdk7=WP;`MNkWvv7~mhxRehf)tR>^C({b`s^7yu-=A1E?WkofCA!G{6sCMK!}t0i zRppG{H9Vhj{95P4uL~h5dNrnU#&vx`t7L;x1dv<6B*dpRpq;h29hM!_Lj`M`Hwt zAN7xpW{VuHg+o}V3d}j&6R>C-)y zcrN5M|G^hhaI9tS%M3he4t(%&)!zJVmshK|FTQFNv z_MV%Iq*r5f2WQ&J#bptoPc<#ASOjqW0{2XE2}eW!{7SiZ*!Q`xmB)RXstkcAQL@s$ zjYGxx^cL18??YnF?*A~FOFe6R^UpsaG|0GeQ&lQ*AA@GU{K9Qsq6z*oB@Rsk8%V8oNm0rg}q=WmU6q!Dwnl5z_gZEbwHUY z@f_Yt_(8u(oyUbS_S@|lG_0<1iRKM2=BB^`Yv_uEe4)9yxl!3D6mOzX?B*e`ohVT$ z2^ax+Z(3@2VnWw*{R>0W)8luyb#(MWy;%j`@mmp#jv^^hXi_gTJ7)}iVz+ju!_U+} z_d7obqv)g|e*fVIZxQW|M&5}^;T&KeV9`uxgBq*%4B8aA;NI(0yDCDyP#cE6pz!dI zAI*Bz}Ipc)(id zZK3iOoUM(FiYkE=KMVo8)CT8T|855j@@Aq3D)8nmK4g=ixk@f%aiJTRh4#MVB;FFT z*tAz*E+eq*nt?(0pNbt>@Ta%G!d>x-X{@pjP`nSjx~jmE-#q>!`#|UTDnw`%uJCaKmDtp`bn_*Huj1o#p=Hya zpd7LS&7UTo1YVbYgd8V%lAF>z`Bjc3%a!QDY^%Z7mSKpnN%ms{y;oJe=R%1F1ss2N zN1q&~lX+t{Ug_j@<8pJXz^fYA3}RO8)_bSmd#I2-)t9wQ)ZO+-jEKSS4SSLB zbI~+2))ElaFD6BySvGK5cz6)OB5b{NAO0n_wO!@J?|u|4$}J+2_#S?Ps%J+oCn~iR zujaV)BT-0%O|RCYRq<853Y?OMFav}J`S`7LL;`5Cb|0ke@8?UC?hfq(?~)0hK5c#7 z9`Zy$7Y#Q+q27n}n?H1ND%qzScUU}E|3d=VB8ku2F*;ZBRX=Qr1(^;*M`RL;VCn3F z0!45%F5{pGUmq(fp_|g0clmsne32zPwY-Zxx|sMFpB7P#uRa?j^CI_32wDs!s$y}N zBX0L1g^;LcbWUY4kl^+$=u|lF=fSx^?a0XKd6|XZfNkb$G%lwY$$4f~J0_Kw$6&ow_>kPFe(2|Ubh8YpbYcwFGLv9Uze11_QN(dzeuQ{&& zkj965_DLdw=Rr}Y0}SA&@tJhn`$N#ScuXV~eGRkIBqw;mj| zsS`@C%7qSXEPA!t(0qwzr@&@Xm_@T#3Qo1@6Jz7FqxH9SySX;ENM9!;uu4P!)L!^d z?y5HybOAp^er+}~D^Yd*VYgSds!)A@5gClOlW`28P*b~o3 zK5m^6EgAw;%lmS2>4g8R&51EPEcs_N#d!Xg^fG=Xa`v;vCMIqA>;ZkyWeL-{a~|G# zJ_zK%2lbDUB0O0N?reHR3bOoPku9Navm7qGCzlZ=25(2zU%nOk2Z*H(?4npg6ekoo zL4DMERq%1JNX?HdC3_$(Ev?y?NIInHI*BNWdamm0SFaS1gM2EK362MfuooIK0&Oe} z^1mRQ&-hidAA%uVcgrX#g4zMEs<=|x3N!cq9=->Z+ ze=tD+x@fSbmI5dbW=+Y)hAj}0E+0n3V$@%@pYNeto~+a3jtzwa3#RF^0Rf_(kslI< zvymOFZ%xwEzQCFY9V=K=K&wH48L&p+8&N3AbF@N4S2OyY7GJpz>Uf1;`Yn=H|1JtO zbF?@tB4boc43mF@D2)&Mm$o(~dwb46__(M#JK|t67QytR2%EMmR8;vPA?KlzOi>E? z{%2x{?bXMZe(<0l7!)Vv$S>+2z+Cls!6%W(f>WCv8{B7)e=i0>R&J+B@AFFxjMw2s zzhh48M!r$}XU%{~0DTE>rj|k%$dNj4 z?^YgMfHp)OIbvkt06(mxJvoD-5A>4ue%UuPLi)>w3o&Tvgc-eA0E=#IZ!00fgL>w} zEsgy%>!4(Cb9C}(V0CT&6|&2Cp~IG{e?NaWqHJNup#ajDJz$$g0m%NrpT&rfU*0-4 zlb?zq70$WJ37>3yiijg)k#RdQU%VXhKq!g--+Lbte-AT;$|3xXia5zp=`pf zePiRT4r}7e^eAb+)PCJwVnn4PE+Ke%*GuJry5P3euO^gY7^TDNIc`D4;XKxdXdjMT zu*C3OsgKWDa>v5Wl$dmq;-5uD7uHf<%_w4hTWJ$$52dj1N-yLkZULL(<8ljJ)I(T| z?Lp|rZVET--hiEUx$PZU?g+73$60Or5)FGf4_bvlEp7Y zsgw2X5kIROpYoF7=8jC;%gs0KY2079uJes4(XmwCUXOlRaOrr-8GDC@GQr+QjFgs@ zEp83!4H||EH4DNPqm+G)9o!f1^qD2sPbKR_l0OtDEi^dz#bK)aJ~>)gKqv2!O$efAMPGqd$A3}`^WhK$^uy&URQI7|y-c-Phb1^Z z^yR$tPqp2SVRxzgWA1_4+@6=Tl9qNHYo#`4%jbLjUOfz+VHIeieTLuaUP`JP=&V_( zskARn1o-}TA$2|0?twLoR+wk2%?y}Ji0XqW zgGj+-OQ6Y^*KX2zdXAQ64#@*txVFO8t$*z=t@@vS>N^NqAT{s9!1#T0}hS~ zFerYMLzc{JKy216qy!&v`N7Z}dr1m-)Ux2@yo!sHr2#)7a1EX;x)xR7L-e=$d+MQ_ z2VE_Ujf@zXn4Uut45cq73-M;Nl!mMmvSzY0P~kjM{iFyO-Jem7^(IC0)8Qfm?`~T# zo<>pA%Q_;rE5KPb&RR9TQZ{D9xpydJyS=1=-g*A~IcIF=Wp=;6j3;EHXB_#(-<85O zz|Nw7iz(S*Y&P0-c&y`8XSCC3=6VKAN5k8s9V2>0EGTR@Mq5S9agrQ&Bz?p zn}H$J65zuS8vmD=2^CWH<_$5Je|(}lh==&!+@DW6%E!*n{r^|z=ij}A5E*f3W%1EM zJ8@ncv6BkVq(k6kpJNjMunhi*fHl6s{{DL)?_88>%m~Jh2`b6 z2zVKFAh`DF5@OY^AIoU87^_ZYD$Fx$Hz`s?)&=DSg&oG6F^qRl{a!Qpm1wkV2BLdK z&Hg=^|2l#H9!PekCFm+_icYvt;VB~%3{88YOR4=3Jn8>%vN@*G-s(p7o2L;HeZo7| z^k4_XYnjwMJgQ-q6B5q;5rs;IhRXjL>OmN6Vv>ITw78TN3~8j= z17H3TguhTiEI~uhag1b$%8ph#Wv{QVBaw3SWoF{`=!vT8%^n$WsV;N+85jgu+SufS z&jX<-Px9zI=m{9vbW-@un>XoT1@d@N_0Rqf0p%cSg?D`@DRF8nGb|f*f_eiQs>zR! zPEtKSi3$PIunc!BOKK^AA0tp)1MOk|3ePTIQYJp-o-rR#k%G>!iM{4=mccE66@@x` z_N->1(R0{$#?yqSAvT0lejJP434Tsg2$Y86`!X_~IA8f5uicNEbKl)4yd*j9^1XWg zKE}K-Z)@zAhS#2myVFJ`c!&@oml%A;hueqsQ-(kZcp>>E_Lo}R*L{_QyH_Nn$7M23tb}#oy5zZE;k@-!j|8d^I{Hte z@a_X$7GO`NQ-sZP+AwN95OzmYT{igjNec3|iRzUe1_pt?!L_||>y~Y=x8!peA(2bX zI4Fs1(5>KM(v-kVRW)U0h{R;4tTb~d zcU=Pu0}9~(qDH|LDt4S#LP9M1KmZr#p;O22@%rN8xm{OD$(C}f#oe{(6K@&l>HicF@gCxfJ&yKBcpr&I zL)gZF#CFfFA?gme;%Sg!QVzk3p0@F?tCxEs>X8%#Q09OUVV#ZtX_|xFv?VL-foOpDCHAcBx;sh zzKTpoeQK-)ebmH-n@^3_f4WBAR*5aL5){)Y>)P-%_rwMhxul#mY(GX`K9e1b4AeBd zzjq!dm_3~tkGdOK(SnrHQtqEl({p3ok1$YnQ2zx*uV80afZoIn+c+$&nY_*F8BQ1S z`n0g{c>k4=S{TTK=&&&kO1Mi=QF-A#+QjoO*l~tw&J!4q;~8eu!ggcgcdiwsGch?m zMXSMz-!dHdd2$#MDVVL>yAq<{>}Ud|oZ)1&{w1Qeu-hRNH~+yI?EQ}I96pC}1N&)t zJy8;}*g2g@Yi$Rn8b-dXQW>M6z>8XotEKLF z#O%{rZ6ozBHfBj^Q|P_lo45dCa12*+I8FprHH^C21r-3APD|8`^OBPi5f}C+$B-GG z6~SW&J0y>g(+!6Tikua}cru;ygMLtWhK34B6#*iA!^yQ_=$16^Ms@1S5aWHEMsdiZ;YzpR%9aaid!^E9tiXD5e?=#eF;+BiAW)?Q*1$)N))9bF&- zZc#JSGezQ&m(+8-p$5yY-UjKB-{=>V+e?7rJ=p^CZEVx=Apw5 z06O3>Kw%H})T0eEagD~IFKKlS-#l=uSkr48l9%oV74+1b)d23&Zcq?%M7n~g_b7nk zuN5HBLp}PEykK(Oes#}#s+UEyV+x@Qs50EB^ zkocil9x4zA96L@hpdTFd0V}Q;BDfFgcmJ3Wd>Lvj*g+xpZ8NB_Arj~Y)+Y^>L-@pn zTQ7ECsA)nl@W}=kev;+s(311ne=^m?wUJ2q%5~wBQA}R_XrXaCR9$la*mC;ON)*rU zbRSR`W>6r*#0yS4V5 zCr~8*Yvdp6zXeS#|1aJxy&xjSDqdc-x~@}!b}^Mn6&1t1Z{6{I0WQwfD3T!Kw){o* zd7)N1dKB`xLdo&&3H>n4x}><65h}po*udEvfLA<6U>n+c)j-anRQhlP*B<6DgBZ6? z*}6(tnHU)%vMt^vzG%zq2$il-GWKQ4c6Tg<8B%pn*1qu=id@s+ygvzR$CQs_7CdF) zoAJt*fwT=&ni80dkdJ_RP&&qk_1}4N{XEYVe|M1@v^%x`BC)LJSj!wmTP0{vynDwU z>B+$QRA>cBzeTnWo);k7aejq7 zxGNRcK4bqts>RMO@^oW9=&hN#x$LyGdyy(|%zu3;fw7VD)Wl|GFildxGTP!6=|c^T z`Q9>j5ga?4rbERYIMHy6Ssy;!hXN7t($O0CGNb@&uq(D{sv8O|g5v4FLHW2EwEjjH z!ZKP-){b*MY*c7@0;R~X+|Bvc;>+1P91m4h_X9(3SPh!pTKF~9FI?8WAV_ZTadf`ah9-Vlv3D3z5NYK6fHcpj$|15T5(oxZa=V}XKZ;7!V2J`u;mfA z2)Eq^KSEcVIlI+q0u#OQJL&FK6SqmVN0Y_Af>t-1ZVu%nIX_Zr6$^u#m&)W{T1?e4& z7YXT+^qzbI&_ZAZG>5Dg9;&NnLI;SzTTZ?h!(WlbmcyK|t9u@THE_ydSdStUNHPdQ zCF8o_EDJcEt+C=2xRt(0;n}n0F=WHf2A&M{Lkk zVKu)R&n0A?hod6Yd(AZ{tzjj*Y3kRhm>d12+1WCnlsEgFZAGAB&&qK=0zdi3^EsHbDz>l8Q>Xf}4+#9Aytu-|K)cs383_{~Q{dc!i=PD?4?wX_K};=s z9g;Zq{P7UFQ95Y>Sr^QlqBlNRdukO3a;&VZAS;$a64h61Yi$Y2IN@sm3^~}k_JuzV z%FfU`uv2Gw)yzOeFzh;_^+=hZ!NluZgti0(hH1X_gEiGfMarOl6Izf3Kf8-_ZIuTz z@yJj^V>qJ3Dbas+)+F(3mp|Gj>2RAszb4%2*4PQhB(@zVqg$kKo)R9-MP6_kZQ>ve z1?7E?q!!@npKAYRmgsT8fe07ge+DkWqYFmj@s;(STgZ&$Fm*Z{`~L~ zgc2ov_l`~0AtVSU4HC@NIM)pYmXy&bcA1?G~AN9pHMj7eaI?vr2u#Tv8o~WhwlLIx4UWYIPri zud=mSmo1{bkBWdJcpKRLcEjkp!_hi~S%EHuRde@SfeMkIwU`xUm@b=dgu31~?+Bm4 zRIi+fb~UW%E*3@uDU0D{WP7MkzsmY@OgEHBng_m z(X-Qf=;IyerEy3bNfQo>JI($m6$YZ1n! zuQ#z&M~XM~=AcaAm#>% z(jQ)Y;#6_F3RkiA(!D0*Dm`%lG&^NQH8PQ}j?zYjmV-uT&GLh$&bwH9!C1ORUKLP3-G}C4!oRj@2{!6?PAcuBPLm zt^?!}!ht0?WAOQXN$@MvRg;(XfOhH-zd;KJAx5pw(-fO z90`Tixb0N6E~+&`hbIR2zJ0ou=vkt-w0>Y&>G_k0)SKVVByQ)ncx2yT?`n~}|3ixX zZ^0WJjITn7bLUtW{CPw+4?Ii<8fga4xyn_KxVcXcW^6X+Pp(ze9^}mokMW+V-__WT z_l-Rb05q!JwQBcqEZ}rW5CgIF2t;_=JjJKlYx}r3wjr7~%R-LFq=((p;#cckD(k8f z-f;KamGh^a&#&_gmn|bZxxMw+a5R9bS6pPbGSwHgH)HC(dbKSf`|-eYy^8_vAPnt} zRf>;?&k2UB)oe4=3A?-RZ{?#8gnx=Y$?UfKSog#%W~GC4XnRq_{igmmovo2G37(Z0 zU+pwww(}UL9`7{0IdjdY2m9mbi5|4qLFB%5|Nba^8QM&5)#qr&^Z@F#k}RsXIm#<{#dd=?eBP;f@$lqddV$#+^XfQp|>hNvS zz~Q3JyD#^VeG!!zLV(y4qcbD!gNwM#)eWe{i0l)qTz#J zVb|%9uKr?17I)vCLgJs=4}vplj?W^g{m2;Q6qVNQtkxjMk=632!|6!?fk1?3rK9*Fdi8FEyKt$L3+Z^o!6^6I8-Q zU0j-5a9a~#=n@QHwWrx&zb%ezZ@`QInMr}gqE1AdGK8%gLNXt;>t+h&AF8`dlHMC} z%X{!JNcdNuY9xsE3$KF>h7^9wYLGIaZIRmS7VP|#PdN5hj8lT)(&C=MCo4Vqx)m>K zHIbveDDmyXCewjqc~A9yS%_>akJn*X#$k2f>BC%luuF|ZO^p)?UY;Yaik?D;_v50zJ)WMh{=RgoE`0i;3wcw?#`cr|Hf=y+ zCv)hO@no#<(mv<_f>WNlEps{-F}Oketd)|v`&2HVh)>dN#hpUeR)l+Ia!+_BWanRqTYU-53N>HWOAEpdH6VBKRVSWf_h%aXx`iUndRQ=>j z^zFKc3hBRvvGkk?J0qS@`t6|qqEqdTIR4pF^C{ksdjZ9RT!+NCvwnc(ZG<`|Iuz$! zB<^6Ex`2S%S)Sk#Dy_z(0hDi88&!DJiM@Y5?uT#A-89D^S7kDOx zr4o;GJCJ@pMZe1@iI6!Pt#W)V4;iRFJkbm_>8NCxT_nj8eN)#5S(M|X*Nzv~bj!&@ z4m=PPYPUkj>P|;c!OLT67V;bDZ~s@;2AXlaiy6!gv$)E1EWKjL)qSE%mjq%o-CF*F z;yYe%Y<9kq6u0(X>*dx7uWBg9CHjOAz3*OYYSaK<`7#dPB~UQ0f9It|WqH^3v8pW< z!v7=+f8@6*CD8mP=5UT%_gQi6iKrnr@=W5O0Z?G{r@xZYT?bRue=#qkFY|Y8 z&Hv7S4Sh^BU0OZRPK;Yh0tRk?;`D1)I$_(Y^>wjW$Qs(6$KRv?PoWYp#=HnkAgN@}`d@S^iT(Ey z!R<4o%9!_!*D1h>?^+?d%q4b^&J3Mb9~u~3e|-X_{|EScH0Z$NnzRCXpuCzC@#T!3 z=M^i>$&m#jdK?kdPUyp7F$2?1$`Ko&3AIDU6zV1%A#{zvcmpEh*h1lAc66|p zU<_SjqJR~=`v?&m(O1KT#=&uN49Epg2Hl9SJRk0TkTPkX@-=gFbCY9dvZv=gptOm` zkTYKoc?Y@MhY%w$7~p)aXm+#UhP-4Rc;sS~xq~p40PuF8VnUitQC8nC(V?fl7*|#U zuG}mGIYA&Rn{-4I4Ulao@Y;z1WKl*5)B(B1Q?U}Tq|y8^?6KJB%O3!H%jT$N1L+fy zB@&($KHaOV+L=Fo{P<|O0fo%_P$=*^BBK5*-pOO0g&15H{!FfGAamQX;+;2NM@5OR zonZS?F;7E_!;2usp>;hOKJ>Vx}Ys0^iDBhnNC#kG;?>jY)c%xXl?#N226~X+lC# z2kfUrrn4Ga#3 zXNl}wXu>nRaZKgHgS0R-de4BCh}@5obKK=JZNMa4CXYq|q6v)kXU@vlOg2Zk53iatXGX8;aUdP*@WD*z%r@Y`*bdTg*2=mRG!HU=6nDkuh zlv0BlbGYB|V-n5Ts_{I4-GMSN4@+#g0PP9yLy-Zb1pT@P2N_~JiS6AHW`vK_{tTOU zR^Yj-1W!`fPFvhfoPP!D@Dq|Rv=;&d49^-Pf(FF{$_muy;SrFw;eg>iHGnsLTwGr6 z2RI(7kCygAsx0V2LXSze)c8PWiWm0Ogh@!`{c(k|-4{b9)Mq0~jw_G`pgDmQkOoBW z!XT3mkOqZnL8NX*p3#Dx#Br(-7XV4+Oh(Q}H;m-t`L8Gm_&3zw;|$x9mzPI&y7llD z56>dt=k~nh3C0pBiWfx=ens!&qRz8{h}2;ZSD~(otS__{p&vrw3LKtOAoq~*<4chM z7daIY0IBf;F330A)b6Dtj=)L-={aKNBq>FcMx5cs#>R+|E0-@rds6eU?P_ab3El3YsbHV@`w+x+5TOAaYl-@%UAAtpF5*MF$Lk8!DWLRcuYJe$^kMt_l z(G-;mlx_jT=K@(<zYj|h^i0C4~n*dzCN?hFRq*)cxKR>zTnG8P5_8j79fMB zjtmk=#sxa!C9(rLF8Vt%-eJzp&3y=PBp@5_4eHw4+8kGFb`}6Ni@S?b_Jf*qNOIn` z8o7liY#<NSoiTiQrStN;$#o#bxhDj{4Sb~qF4uKgoJ*9gq@m53`%(f=)f5${ z&fbO!BAf}1A+uZN0L=m6wHxqKx(gw4p>JLmqS*V(#fO0z4H^^?WpK+FLU5ZEO-(ao z>7&qvlZPuVNNP;W1v-XR0^OhPLI;{#_ar`iAt711GLEZ5_@CS)=T9`L+W?Xe_**ZG z2yz7A{>2&8rB+MscK8E$he^?6*oXa|x3#H|Q+aVREGFc{zXd|wH+XU=dZvKU(P!fxVLe~p4Q zJ;zp^1e*!>z{JWb2k2Ek^ysCuy&JIknLK;t2+;Oy&>R-f_yI}snCgvV;9QZ}3Ls#e z>#uJgZ`+n>!3)smZK)_6&>`}j$JfSE467H!4_ea3uh9i z8|G#~GLY-R7=}gV{HwaI4K|uME>r*my`%+@Igp?BJ4d-K~b<;06o;r0Ko&YVP1M3 zml2HF@cTRJNbN{%kXo7@=v+EI&v4^LQ~&Vre3=~J#isF4-;`)|@rbmV4ai}vkih)0 zAE|wYfc)3!?8?c3#7ph49xw%34F-QjjKZuCs`XvTk-FsVuZKhMCdr2mtJAQU zlp9anp|@a=c79P2*Ecs<;U>!buV#)wT5ug;V`L<;*V(x}H8BC?z^BVqA3w5xb0S`< z5cJsnn4O;9{2KZ)_0?8svqCnpH)7FU?}qO-Vvyh`N=D^OA?sTk(Q-h_X{a^`D4`jx zCnfS_Izavqz7y?YX2wn;xCrgxDiEL^&UTD^>ue`QDZ2eZ4mGz~!3x+}glK+kGDXa}zxV#P;lHd$@7yDX* ztivUCPfda9SvyB0`CYg)xpB5(4y{uJ4Pjm12*H>eq+m=9-Z&RMj(Gz~SqE0QSD`s5 zFfm46lGox{{Z)%3*=ldz#A3Hs=(#^EV5fJ_+{S-k!msJbg=_vJIfed@XI7V~@RfB) z-o2qeP28hcR-F8lB$@dWE{plQ2-y>c+Qq%NcD-Ezc^$79LX9K>Nr{#50`MuG;U(AH zenr}NKn(rpu>!p@^CunuJebH&1NI z6OTA;*C{VJ73qfkN`S%UL7&#P#raC+I6nb`HoGc$S|*|=m-~OJZ`_` zy-6tcF^;Eu>_FU5c!kVnywVuogEE{U_v0LM{S5!)@ELFv`GXTX_s*~49aw=shQnxHJeFu(^9Oq9j#8UG~2Gh*rB4_?OXtR@y(4ez&BO!A!^W)(JxO#DQ`3cWpn|@i$KfE%qby_?GrZk_IGOMxIF=B-c-uTUIJm@ZYXQ>+oH1pP6X02= zLLV;Vn9a^e0a_25F;-ERmz+Csabdxp62R9q(EXudVGF=NnoWBY(;`bZ$pH)>RoXvd ziakM7Z&a3kY?($hF<4~;>fJD`%~{ zZd}6viEk5%QW**HQjKNjt^xN0e(IYZfrP+~$+=?=EP*=}gOjKeGG6P6d4*eQ8MnL# zZ9CT~tMnUiq&WvWUTJ)cUFJ~cv-=(ZHwuw_s;}>f3+K=OuS3O7gEg?lPQZdjO7LKM!R=82 z0RgAoUsJ_~dahHrpon88!NZ9yibrOy?FquaoSsBI9EF6kyRY9b}y>_S0z);QJ%WRYGt3pu~@X~D_9t%fD z6{tbs{@e-M_0D|+0<9;)tlH&C;2nKg-C2bE!K~NPbz5F?Ral`3w?!7t7QlsN$a^iY)VQ+ zH3(_@t({5lSZDb809icBa7lR1{q=UXnKjoH%&Hmpv>bhmQKa;L{;+MQW|?*T zffx4Rs!=}lUXylPE*Yd>j*X6H$1G14QS1P>PLAIT{0285^_ax(LD_Ta;n7+Pl?5y# zAD7i&z8;bW{~`#|2DIsPu>W9`enKASfxu3F-BAy?p*^K6y46F)4vvm}mgsG#b>wJZ zu2E3*Ln^(fbYm{jTW-+>Ga;WSoC~o_Q9g9}W~)0W1pn{HmfOt6p5y9-5M;e$#gaX} zyWFW@IM#4t%yP(Q1Afo_VtRV|%`Smea8MXmNot_my(R>N!iSs6(mfDcWkF$mDn%J^ zihx#b?&>;JJXN(?8%3L|U7>`O7V}LEcR?Ro<%n7+Q~thI{zdS~dMfnqLJHN8M80KN zOaJh|^XF%4fa?kgJouzmnH*%C24J@`$ai=qT$@5LDu;Z$yk>cJQ1zFCK7dwUk6#qV z9PS-qR@>(`<{>bLbYJ2vc*}NpNJwT?l~&!+ezsV0wF<6OfA_+e}5X%Iw5j$Rn6PG zF3ZnUwQTvjfw;IoEz@QEw9e>p2<@Q70-ZFF+hXms{2G$7isu9-SNyfDI^J%u(zVwF)uzVBSRAUf<)57FGWG=^zx&vElLaIX#X&&I4RI8Tnmf}Us3&$ zL&{Kz{@z~0=32=8Co8lr!9;`X?`ylM&b*0%zz0p7hfs}N`46`d6QBzX#GI@7yuyq3hl^LV8eE1y3T4} zZRBg>M9SV^sr?%CN7QD|Wx-jVtM#x-ypV`CeGw0E!)%()-q19SFPA(-32J?WZ ze|4vQ*;$_788!<4wEyGw+eU>g<43EGL!T#)j*j9!JGB=ULmw2b+2N6$BXLoemj?3|oe?K3??L(!F$l`uz!E`KmPMKb(;r1R=TuB%{P zL&F^qxzE#fdmh3d;dD{b%BP$ww4ped^=HT3ve8^135v2Ym4Zc`fX+#M(W}QVRLgQo6IjvGD+V@a;1<$SVCQY3=9rogd*LHhy9GB|3 zG;ql;UjZEh>(-&Rr=0Z)3WdOlksbBgn|XVtbT+HTdLH1Qb=GtGSMS|;+4>6C!qk^* z(sCLm;y~Mz*qEf}cN=qrEW(39>o8t^=hz8aQ#rMi@9n$H{CxP${gb;tS5c^wya*Qn zm@e}i7TcBDtWb~-PbEwa1YQV>GBh4+$giS)eLszp zhmz0!E^-E;X?c$S>Rh0*^UJTS^%dhQ)%O(Vm|;v*Z0X7eV~xi}k{dPD7PhtuZ;Zbz zZoM0hQr2Oc47pczk#qD`R1S37tqhuW9| z15pXyLqk61lumdBOg}0HPp;@_S%q!mcZ7h#&jOVs{PsbSAxgz|yoA8feaG2a^i7A&;UWmf!mYYLakJ@zB91dbs7y9Y{G>yM4rGqdRfM;pYT?VN%2&~MLZ*25G=W@UF1rz&Nslu|%bk+0Br zzQnx$L-8`8e0|V!n4Y2zM#}(EG}Q`7Xxan8|J|wj$H(e2HG#W%!>qBs$^;Zfn@b~(nF5}_;71W>@Bd{o{5>F9D3tU)xx0B{`hNcp*I8|U diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst deleted file mode 100644 index 8e074779..00000000 --- a/doc/source/configuration.rst +++ /dev/null @@ -1,185 +0,0 @@ -.. - Copyright 2012 New Dream Network, LLC (DreamHost) - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -======================= - Configuration Options -======================= - -For the list and description of configuration options that can be set for Ceilometer in -order to set up the services please see the -`Telemetry section `_ -in the OpenStack Manuals Configuration Reference. - -Sample Configuration file -========================= - -The sample configuration file for Ceilometer, named -etc/ceilometer/ceilometer.conf.sample, was removed from version control after -the Icehouse release. For more details, please read the file -etc/ceilometer/README-ceilometer.conf.txt. You can generate this sample -configuration file by running ``tox -e genconfig``. - -.. _Pipeline-Configuration: - -Pipelines -========= - -Pipelines describe a coupling between sources of samples and the -corresponding sinks for transformation and publication of the samples. - -A source is a producer of samples, in effect a set of pollsters and/or -notification handlers emitting samples for a set of matching meters. -See :doc:`plugins` for details on how to write and plug in your plugins. - -Each source configuration encapsulates meter name matching, polling -interval determination, optional resource enumeration or discovery, -and mapping to one or more sinks for publication. - -A sink on the other hand is a consumer of samples, providing logic for -the transformation and publication of samples emitted from related sources. -Each sink configuration is concerned `only` with the transformation rules -and publication conduits for samples. - -In effect, a sink describes a chain of handlers. The chain starts with -zero or more transformers and ends with one or more publishers. The first -transformer in the chain is passed samples from the corresponding source, -takes some action such as deriving rate of change, performing unit conversion, -or aggregating, before passing the modified sample to next step. - -The chains end with one or more publishers. This component makes it possible -to persist the data into storage through the message bus or to send it to one -or more external consumers. One chain can contain multiple publishers, see the -:ref:`multi-publisher` section. - - -Pipeline configuration ----------------------- - -Pipeline configuration by default, is stored in a separate configuration file, -called pipeline.yaml, next to the ceilometer.conf file. The pipeline -configuration file can be set in the *pipeline_cfg_file* parameter in -ceilometer.conf. Multiple chains can be defined in one configuration file. - -The chain definition looks like the following:: - - --- - sources: - - name: 'source name' - interval: 'how often should the samples be injected into the pipeline' - meters: - - 'meter filter' - resources: - - 'list of resource URLs' - discovery: - - 'list of discoverers' - sinks - - 'sink name' - sinks: - - name: 'sink name' - transformers: 'definition of transformers' - publishers: - - 'list of publishers' - -The *name* parameter of a source is unrelated to anything else; -nothing references a source by name, and a source's name does not have -to match anything. - -The *interval* parameter in the sources section should be defined in seconds. It -determines the cadence of sample injection into the pipeline, where samples are -produced under the direct control of an agent, i.e. via a polling cycle as opposed -to incoming notifications. - -There are several ways to define the list of meters for a pipeline source. The -list of valid meters can be found in the :ref:`measurements` section. There is -a possibility to define all the meters, or just included or excluded meters, -with which a source should operate: - -* To include all meters, use the '*' wildcard symbol. -* To define the list of meters, use either of the following: - - * To define the list of included meters, use the 'meter_name' syntax - * To define the list of excluded meters, use the '!meter_name' syntax - * For meters, which identify a complex Sample field, use the wildcard - symbol to select all, e.g. for "disk.read.bytes", use "disk.\*" - -The above definition methods can be used in the following combinations: - -* Only the wildcard symbol -* The list of included meters -* The list of excluded meters -* Wildcard symbol with the list of excluded meters - -.. note:: - At least one of the above variations should be included in the meters - section. Included and excluded meters cannot co-exist in the same - pipeline. Wildcard and included meters cannot co-exist in the same - pipeline definition section. - -A given polling plugin is invoked according to each source section -whose *meters* parameter matches the plugin's meter name. That is, -the matching source sections are combined by union, not intersection, -of the prescribed time series. - -The optional *resources* section of a pipeline source allows a list of -static resource URLs to be configured. An amalgamated list of all -statically configured resources for a set of pipeline sources with a -common interval is passed to individual pollsters matching those pipelines. - -The optional *discovery* section of a pipeline source contains the list of -discoverers. These discoverers can be used to dynamically discover the -resources to be polled by the pollsters defined in this pipeline. The name -of the discoverers should be the same as the related names of plugins in -setup.cfg. - -If *resources* or *discovery* section is not set, the default value would -be an empty list. If both *resources* and *discovery* are set, the final -resources passed to the pollsters will be the combination of the dynamic -resources returned by the discoverers and the static resources defined -in the *resources* section. If there are some duplications between the -resources returned by the discoverers and those defined in the *resources* -section, the duplication will be removed before passing those resources -to the pollsters. - -There are three ways a pollster can get a list of resources to poll, as the -following in descending order of precedence: - - 1. From the per-pipeline configured discovery and/or static resources. - 2. From the per-pollster default discovery. - 3. From the per-agent default discovery. - -The *transformers* section of a pipeline sink provides the possibility to add a -list of transformer definitions. The names of the transformers should be the same -as the names of the related extensions in setup.cfg. For a more detailed -description, please see the `transformers`_ section of the Administrator Guide -of Ceilometer. - -.. _transformers: http://docs.openstack.org/admin-guide/telemetry-data-collection.html#transformers - -The *publishers* section contains the list of publishers, where the samples -data should be sent after the possible transformations. The names of the -publishers should be the same as the related names of the plugins in -setup.cfg. - -The default configuration can be found in `pipeline.yaml`_. - -.. _pipeline.yaml: https://git.openstack.org/cgit/openstack/ceilometer/tree/etc/ceilometer/pipeline.yaml - -Publishers -++++++++++ - -For more information about publishers see the `publishers`_ section of the -Administrator Guide of Ceilometer. - -.. _publishers: http://docs.openstack.org/admin-guide/telemetry-data-retrieval.html#publishers diff --git a/doc/source/events.rst b/doc/source/events.rst deleted file mode 100644 index 9091d5ed..00000000 --- a/doc/source/events.rst +++ /dev/null @@ -1,291 +0,0 @@ -.. - Copyright 2013 Rackspace Hosting. - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _events: - -============================= - Events and Event Processing -============================= - -Events vs. Samples -================== - -In addition to Meters, and related Sample data, Ceilometer can also process -Events. While a Sample represents a single numeric datapoint, driving a Meter -that represents the changes in that value over time, an Event represents the -state of an object in an OpenStack service (such as an Instance in Nova, or -an Image in Glance) at a point in time when something of interest has occurred. -This can include non-numeric data, such as an instance's flavor, or network -address. - -In general, Events let you know when something has changed about an -object in an OpenStack system, such as the resize of an instance, or creation -of an image. - -While Samples can be relatively cheap (small), -disposable (losing an individual sample datapoint won't matter much), -and fast, Events are larger, more informative, and should be handled more -consistently (you do not want to lose one). - -Event Structure -=============== - -To facilitate downstream processing (billing and/or aggregation), a -:doc:`minimum required data set and format ` has been defined for -services, however events generally contain the following information: - - -event_type - A dotted string defining what event occurred, such as "compute.instance.resize.start" - -message_id - A UUID for this event. - -generated - A timestamp of when the event occurred on the source system. - -traits - A flat mapping of key-value pairs. - The event's Traits contain most of the details of the event. Traits are - typed, and can be strings, ints, floats, or datetimes. - -raw - (Optional) Mainly for auditing purpose, the full notification message - can be stored (unindexed) for future evaluation. - -Events from Notifications -========================= - -Events are primarily created via the notifications system in OpenStack. -OpenStack systems, such as Nova, Glance, Neutron, etc. will emit -notifications in a JSON format to the message queue when some notable action is -taken by that system. Ceilometer will consume such notifications from the -message queue, and process them. - -The general philosophy of notifications in OpenStack is to emit any and all -data someone might need, and let the consumer filter out what they are not -interested in. In order to make processing simpler and more efficient, -the notifications are stored and processed within Ceilometer as Events. -The notification payload, which can be an arbitrarily complex JSON data -structure, is converted to a flat set of key-value pairs known as Traits. -This conversion is specified by a config file, so that only the specific -fields within the notification that are actually needed for processing the -event will have to be stored as Traits. - -Note that the Event format is meant for efficient processing and querying, -there are other means available for archiving notifications (i.e. for audit -purposes, etc), possibly to different datastores. - -Converting Notifications to Events ----------------------------------- - -In order to make it easier to allow users to extract what they need, -the conversion from Notifications to Events is driven by a -configuration file (specified by the flag definitions_cfg_file_ in -ceilometer.conf). - -This includes descriptions of how to map fields in the notification body -to Traits, and optional plugins for doing any programmatic translations -(splitting a string, forcing case, etc.) - -The mapping of notifications to events is defined per event_type, which -can be wildcarded. Traits are added to events if the corresponding fields -in the notification exist and are non-null. (As a special case, an empty -string is considered null for non-text traits. This is due to some openstack -projects (mostly Nova) using empty string for null dates.) - -If the definitions file is not present, a warning will be logged, but an empty -set of definitions will be assumed. By default, any notifications that -do not have a corresponding event definition in the definitions file will be -converted to events with a set of minimal, default traits. This can be -changed by setting the flag drop_unmatched_notifications_ in the -ceilometer.conf file. If this is set to True, then any notifications -that don't have events defined for them in the file will be dropped. -This can be what you want, the notification system is quite chatty by design -(notifications philosophy is "tell us everything, we'll ignore what we don't -need"), so you may want to ignore the noisier ones if you don't use them. - -.. _definitions_cfg_file: http://docs.openstack.org/trunk/config-reference/content/ch_configuring-openstack-telemetry.html -.. _drop_unmatched_notifications: http://docs.openstack.org/trunk/config-reference/content/ch_configuring-openstack-telemetry.html - -There is a set of default traits (all are TEXT type) that will be added to -all events if the notification has the relevant data: - -* service: (All notifications should have this) notification's publisher -* tenant_id -* request_id -* project_id -* user_id - -These do not have to be specified in the event definition, they are -automatically added, but their definitions can be overridden for a given -event_type. - -Definitions file format ------------------------ - -The event definitions file is in YAML format. It consists of a list of event -definitions, which are mappings. Order is significant, the list of definitions -is scanned in *reverse* order (last definition in the file to the first), -to find a definition which matches the notification's event_type. That -definition will be used to generate the Event. The reverse ordering is done -because it is common to want to have a more general wildcarded definition -(such as "compute.instance.*" ) with a set of traits common to all of those -events, with a few more specific event definitions (like -"compute.instance.exists") afterward that have all of the above traits, plus -a few more. This lets you put the general definition first, followed by the -specific ones, and use YAML mapping include syntax to avoid copying all of the -trait definitions. - -Event Definitions ------------------ - -Each event definition is a mapping with two keys (both required): - -event_type - This is a list (or a string, which will be taken as a 1 element - list) of event_types this definition will handle. These can be - wildcarded with unix shell glob syntax. An exclusion listing - (starting with a '!') will exclude any types listed from matching. - If ONLY exclusions are listed, the definition will match anything - not matching the exclusions. -traits - This is a mapping, the keys are the trait names, and the values are - trait definitions. - -Trait Definitions ------------------ - -Each trait definition is a mapping with the following keys: - -type - (optional) The data type for this trait. (as a string). Valid - options are: *text*, *int*, *float*, and *datetime*. - defaults to *text* if not specified. -fields - A path specification for the field(s) in the notification you wish - to extract for this trait. Specifications can be written to match - multiple possible fields, the value for the trait will be derived - from the matching fields that exist and have a non-null values in - the notification. By default the value will be the first such field. - (plugins can alter that, if they wish). This is normally a string, - but, for convenience, it can be specified as a list of - specifications, which will match the fields for all of them. (See - `Field Path Specifications`_ for more info on this syntax.) -plugin - (optional) This is a mapping (For convenience, this value can also - be specified as a string, which is interpreted as the name of a - plugin to be loaded with no parameters) with the following keys - - name - (string) name of a plugin to load - - parameters - (optional) Mapping of keyword arguments to pass to the plugin on - initialization. (See documentation on each plugin to see what - arguments it accepts.) - -Field Path Specifications -------------------------- - -The path specifications define which fields in the JSON notification -body are extracted to provide the value for a given trait. The paths -can be specified with a dot syntax (e.g. "payload.host"). Square -bracket syntax (e.g. "payload[host]") is also supported. In either -case, if the key for the field you are looking for contains special -characters, like '.', it will need to be quoted (with double or single -quotes) like so: - - payload.image_meta.'org.openstack__1__architecture' - -The syntax used for the field specification is a variant of JSONPath, -and is fairly flexible. (see: https://github.com/kennknowles/python-jsonpath-rw for more info) - -Example Definitions file ------------------------- - -:: - - --- - - event_type: compute.instance.* - traits: &instance_traits - user_id: - fields: payload.user_id - instance_id: - fields: payload.instance_id - host: - fields: publisher_id - plugin: - name: split - parameters: - segment: 1 - max_split: 1 - service_name: - fields: publisher_id - plugin: split - instance_type_id: - type: int - fields: payload.instance_type_id - os_architecture: - fields: payload.image_meta.'org.openstack__1__architecture' - launched_at: - type: datetime - fields: payload.launched_at - deleted_at: - type: datetime - fields: payload.deleted_at - - event_type: - - compute.instance.exists - - compute.instance.update - traits: - <<: *instance_traits - audit_period_beginning: - type: datetime - fields: payload.audit_period_beginning - audit_period_ending: - type: datetime - fields: payload.audit_period_ending - -Trait plugins -------------- - -Trait plugins can be used to do simple programmatic conversions on the value in -a notification field, like splitting a string, lowercasing a value, converting -a screwball date into ISO format, or the like. They are initialized with the -parameters from the trait definition, if any, which can customize their -behavior for a given trait. They are called with a list of all matching fields -from the notification, so they can derive a value from multiple fields. The -plugin will be called even if there are no fields found matching the field -path(s), this lets a plugin set a default value, if needed. A plugin can also -reject a value by returning *None*, which will cause the trait not to be -added. If the plugin returns anything other than *None*, the trait's value -will be set to whatever the plugin returned (coerced to the appropriate type -for the trait). - -Building Notifications -====================== - -In general, the payload format OpenStack services emit could be described as -the Wild West. The payloads are often arbitrary data dumps at the time of -the event which is often susceptible to change. To make consumption easier, -the Ceilometer team offers two proposals: CADF_, an open, cloud standard -which helps model cloud events and the PaaS Event Format. - -.. toctree:: - :maxdepth: 1 - - format - -.. _CADF: http://docs.openstack.org/developer/pycadf/ diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst deleted file mode 100644 index 2787d5d4..00000000 --- a/doc/source/glossary.rst +++ /dev/null @@ -1,132 +0,0 @@ -.. - Copyright 2012 New Dream Network (DreamHost) - Copyright 2013 eNovance - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -========== - Glossary -========== - -.. glossary:: - - agent - Software service running on the OpenStack infrastructure - measuring usage and sending the results to the :term:`collector`. - - API server - HTTP REST API service for ceilometer. - - billing - Billing is the process to assemble bill line items into a single - per customer bill, emitting the bill to start the payment collection. - - bus listener agent - Bus listener agent which takes events generated on the Oslo - notification bus and transforms them into Ceilometer samples. This - is the preferred method of data collection. - - ceilometer - From Wikipedia [#]_: - - A ceilometer is a device that uses a laser or other light - source to determine the height of a cloud base. - - polling agent - Software service running either on a central management node within the - OpenStack infrastructure or compute node measuring usage and sending the - results to the :term:`collector`. - - collector - Software service running on the OpenStack infrastructure - monitoring notifications from other OpenStack components and - samples from the ceilometer agent and recording the results - in the database. - - notification agent - The different OpenStack services emit several notifications about the - various types of events. The notification agent consumes them from - respective queues and filters them by the event_type. - - data store - Storage system for recording data collected by ceilometer. - - meter - The measurements tracked for a resource. For example, an instance has - a number of meters, such as duration of instance, CPU time used, - number of disk io requests, etc. - Three types of meters are defined in ceilometer: - - * Cumulative: Increasing over time (e.g. disk I/O) - * Gauge: Discrete items (e.g. floating IPs, image uploads) and fluctuating - values (e.g. number of Swift objects) - * Delta: Incremental change to a counter over time (e.g. bandwidth delta) - - metering - Metering is the process of collecting information about what, - who, when and how much regarding anything that can be billed. The result of - this is a collection of "tickets" (a.k.a. samples) which are ready to be - processed in any way you want. - - notification - A message sent via an external OpenStack system (e.g Nova, Glance, - etc) using the Oslo notification mechanism [#]_. These notifications - are usually sent to and received by Ceilometer through the notifier - RPC driver. - - non-repudiable - From Wikipedia [#]_: - - Non-repudiation refers to a state of affairs where the purported - maker of a statement will not be able to successfully challenge - the validity of the statement or contract. The term is often - seen in a legal setting wherein the authenticity of a signature - is being challenged. In such an instance, the authenticity is - being "repudiated". - - project - The OpenStack tenant or project. - - polling agents - The polling agent is collecting measurements by polling some API or other - tool at a regular interval. - - push agents - The push agent is the only solution to fetch data within projects, - which do not expose the required data in a remotely usable way. This - is not the preferred method as it makes deployment a bit more - complex having to add a component to each of the nodes that need - to be monitored. - - rating - Rating is the process of analysing a series of tickets, - according to business rules defined by marketing, in order to transform - them into bill line items with a currency value. - - resource - The OpenStack entity being metered (e.g. instance, volume, image, etc). - - sample - Data sample for a particular meter. - - source - The origin of metering data. This field is set to "openstack" by default. - It can be configured to a different value using the sample_source field - in the ceilometer.conf file. - - user - An OpenStack user. - -.. [#] http://en.wikipedia.org/wiki/Ceilometer -.. [#] https://git.openstack.org/cgit/openstack/ceilometer/tree/ceilometer/openstack/common/notifier -.. [#] http://en.wikipedia.org/wiki/Non-repudiation diff --git a/doc/source/gmr.rst b/doc/source/gmr.rst index 2453b0c1..4e6bed92 100644 --- a/doc/source/gmr.rst +++ b/doc/source/gmr.rst @@ -27,10 +27,10 @@ A *GMR* can be generated by sending the *USR1* signal to any Ceilometer process with support (see below). The *GMR* will then be outputted standard error for that particular process. -For example, suppose that ``ceilometer-polling`` has process id ``8675``, and -was run with ``2>/var/log/ceilometer/ceilometer-polling.log``. Then, +For example, suppose that ``ceilometer-api`` has process id ``8675``, and +was run with ``2>/var/log/ceilometer/ceilometer-api.log``. Then, ``kill -USR1 8675`` will trigger the Guru Meditation report to be printed to -``/var/log/ceilometer/ceilometer-polling.log``. +``/var/log/ceilometer/ceilometer-api.log``. Structure of a GMR ------------------ diff --git a/doc/source/index.rst b/doc/source/index.rst index c3589ad0..6e48bed9 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -17,13 +17,7 @@ Welcome to the Ceilometer developer documentation! ================================================== -The :term:`Ceilometer` project is a data collection service that provides the -ability to normalise and transform data across all current OpenStack core -components with work underway to support future OpenStack components. - -Ceilometer is a component of the Telemetry project. Its data can be used to -provide customer billing, resource tracking, and alarming capabilities -across all OpenStack core components. +Ceilometer is a component of the Telemetry project. This documentation offers information on how Ceilometer works and how to contribute to the project. @@ -34,10 +28,6 @@ Overview .. toctree:: :maxdepth: 2 - overview - architecture - measurements - events webapi/index Developer Documentation @@ -47,9 +37,6 @@ Developer Documentation :maxdepth: 2 install/index - configuration - plugins - new_meters testing contributing gmr @@ -60,8 +47,6 @@ Appendix .. toctree:: :maxdepth: 1 - releasenotes/index - glossary api/index diff --git a/doc/source/install/custom.rst b/doc/source/install/custom.rst deleted file mode 100644 index edb954db..00000000 --- a/doc/source/install/custom.rst +++ /dev/null @@ -1,165 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _customizing_deployment: - -=================================== - Customizing Ceilometer Deployment -=================================== - -Notifications queues -==================== - -.. index:: - double: customizing deployment; notifications queues; multiple topics - -By default, Ceilometer consumes notifications on the messaging bus sent to -**topics** by using a queue/pool name that is identical to the -topic name. You shouldn't have different applications consuming messages from -this queue. If you want to also consume the topic notifications with a system -other than Ceilometer, you should configure a separate queue that listens for -the same messages. - -Ceilometer allows multiple topics to be configured so that the polling agent can -send the same messages of notifications to other queues. Notification agents -also use **topics** to configure which queue to listen for. If -you use multiple topics, you should configure notification agent and polling -agent separately, otherwise Ceilometer collects duplicate samples. - -By default, the ceilometer.conf file is as follows:: - - [oslo_messaging_notifications] - topics = notifications - -To use multiple topics, you should give ceilometer-agent-notification and -ceilometer-polling services different ceilometer.conf files. The Ceilometer -configuration file ceilometer.conf is normally locate in the /etc/ceilometer -directory. Make changes according to your requirements which may look like -the following:: - -For notification agent using ceilometer-notification.conf, settings like:: - - [oslo_messaging_notifications] - topics = notifications,xxx - -For polling agent using ceilometer-polling.conf, settings like:: - - [oslo_messaging_notifications] - topics = notifications,foo - -.. note:: - - notification_topics in ceilometer-notification.conf should only have one same - topic in ceilometer-polling.conf - -Doing this, it's easy to listen/receive data from multiple internal and external services. - - -Using multiple dispatchers -========================== - -.. index:: - double: customizing deployment; multiple dispatchers - -The Ceilometer collector allows multiple dispatchers to be configured so that -data can be easily sent to multiple internal and external systems. Dispatchers -are divided between ``event_dispatchers`` and ``meter_dispatchers`` which can -each be provided with their own set of receiving systems. - -.. note:: - - In Liberty and prior, the configuration option for all data was - ``dispatcher`` but this was changed for the Mitaka release to break out - separate destination systems by type of data. - -By default, Ceilometer only saves event and meter data in a database. If you -want Ceilometer to send data to other systems, instead of or in addition to -the Ceilometer database, multiple dispatchers can be enabled by modifying the -Ceilometer configuration file. - -Ceilometer ships multiple dispatchers currently. They are ``database``, -``file``, ``http`` and ``gnocchi`` dispatcher. As the names imply, database -dispatcher sends metering data to a database, file dispatcher logs meters into -a file, http dispatcher posts the meters onto a http target, gnocchi -dispatcher posts the meters onto Gnocchi_ backend. Each dispatcher can have -its own configuration parameters. Please see available configuration -parameters at the beginning of each dispatcher file. - -.. _Gnocchi: http://gnocchi.readthedocs.org/en/latest/basic.html - -To check if any of the dispatchers is available in your system, you can -inspect the Ceilometer egg entry_points.txt file, you should normally see text -like the following:: - - [ceilometer.dispatcher] - database = ceilometer.dispatcher.database:DatabaseDispatcher - file = ceilometer.dispatcher.file:FileDispatcher - http = ceilometer.dispatcher.http:HttpDispatcher - gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher - -To configure one or multiple dispatchers for Ceilometer, find the Ceilometer -configuration file ceilometer.conf which is normally located at /etc/ceilometer -directory and make changes accordingly. Your configuration file can be in a -different directory. - -To use multiple dispatchers on a Ceilometer collector service, add multiple -dispatcher lines in ceilometer.conf file like the following:: - - [DEFAULT] - meter_dispatchers=database - meter_dispatchers=file - -If there is no dispatcher present, database dispatcher is used as the -default. If in some cases such as traffic tests, no dispatcher is needed, -one can configure the line without a dispatcher, like the following:: - - event_dispatchers= - -With the above configuration, no event dispatcher is used by the Ceilometer -collector service, all event data received by Ceilometer collector will be -dropped. - -For Gnocchi dispatcher, the following configuration settings should be added:: - - [DEFAULT] - meter_dispatchers = gnocchi - - [dispatcher_gnocchi] - archive_policy = low - -The value specified for ``archive_policy`` should correspond to the name of an -``archive_policy`` configured within Gnocchi. - -For Gnocchi dispatcher backed by Swift storage, the following additional -configuration settings should be added:: - - [dispatcher_gnocchi] - filter_project = gnocchi_swift - filter_service_activity = True - -.. note:: - If gnocchi dispatcher is enabled, Ceilometer api calls will return a 410 with - an empty result. The Gnocchi Api should be used instead to access the data. - -Efficient polling -================= - -- There is an optional config called ``shuffle_time_before_polling_task`` - in ceilometer.conf. Enable this by setting an integer greater than zero to - shuffle polling time for agents. This will add some random jitter to the time - of sending requests to Nova or other components to avoid large number of - requests in a short time period. -- There is an option to stream samples to minimise latency (at the - expense of load) by setting ``batch_polled_samples`` to ``False`` in - ceilometer.conf. - diff --git a/doc/source/install/dbreco.rst b/doc/source/install/dbreco.rst deleted file mode 100644 index 55be61dc..00000000 --- a/doc/source/install/dbreco.rst +++ /dev/null @@ -1,89 +0,0 @@ -.. - Copyright 2013 Nicolas Barcet for eNovance - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _choosing_db_backend: - -============================ - Choosing a database backend -============================ - -.. note:: - - Ceilometer's native database capabilities is intended for post processing - and auditing purposes where responsiveness is not a requirement. It - captures the full fidelity of each datapoint and thus is not designed - for low latency use cases. For more responsive use cases, it's recommended - to store data in an alternative source such as Gnocchi_. Please see - `Moving from Ceilometer to Gnocchi`_ to find more information. - -.. note:: - - As of Liberty, alarming support, and subsequently its database, is handled - by Aodh_. - -.. _Aodh: http://docs.openstack.org/developer/aodh/ - -Selecting a database backend for Ceilometer should not be done lightly for -numerous reasons: - -1. Not all backend drivers are equally implemented and tested. To help you - make your choice, the table below will give you some idea of the - status of each of the drivers available in trunk. Note that we do welcome - patches to improve completeness and quality of drivers. - -2. It may not be a good idea to use the same host as another database as - Ceilometer can generate a LOT OF WRITES. For this reason it is generally - recommended, if the deployment is targeting going into production, to use - a dedicated host, or at least a VM which will be migratable to another - physical host if needed. The following spreadsheet can help you get an - idea of the volumes that ceilometer can generate: - `Google spreadsheet `_ - -3. If you are relying on this backend to bill customers, you will note that - your capacity to generate revenue is very much linked to its reliability, - which seems to be a factor dear to many managers. - -The following is a table indicating the status of each database drivers: - -================== ============================= =========================================== -Driver API querying API statistics -================== ============================= =========================================== -MongoDB Yes Yes -MySQL Yes Yes -PostgreSQL Yes Yes -HBase Yes Yes, except groupby & selectable aggregates -================== ============================= =========================================== - - -Moving from Ceilometer to Gnocchi -================================= - -Gnocchi represents a fundamental change in how data is represented and stored. -Installation and configuration can be found in :ref:`installing_manually`. -Differences between APIs can be found here_. - -There currently exists no migration tool between the services. To transition -to Gnocchi, multiple dispatchers can be enabled in the Collector to capture -data in both the native Ceilometer database and Gnocchi. This will allow you -to test Gnocchi and transition to it fully when comfortable. The following -should be included in addition to the required configurations for each -backend:: - - [DEFAULT] - meter_dispatchers=database - meter_dispatchers=gnocchi - -.. _Gnocchi: http://gnocchi.xyz -.. _here: https://docs.google.com/presentation/d/1PefouoeMVd27p2OGDfNQpx18mY-Wk5l0P1Ke2Vt5LwA/edit?usp=sharing diff --git a/doc/source/install/development.rst b/doc/source/install/development.rst index 08a052d8..3886261f 100644 --- a/doc/source/install/development.rst +++ b/doc/source/install/development.rst @@ -18,15 +18,6 @@ Installing development sandbox =============================== -Ceilometer has several daemons. The basic are: :term:`polling agent` running -either on the Nova compute node(s) or :term:`polling agent` running on the -central management node(s), :term:`collector` and :term:`notification agent` -running on the cloud's management node(s). - -In a development environment created by devstack_, these services are -typically running on the same server. - - Configuring devstack ==================== @@ -37,11 +28,7 @@ Configuring devstack 2. Create a ``local.conf`` file as input to devstack. -3. Ceilometer makes extensive use of the messaging bus, but has not - yet been tested with ZeroMQ. We recommend using Rabbit for - now. By default, RabbitMQ will be used by devstack. - -4. The ceilometer services are not enabled by default, so they must be +3. The ceilometer services are not enabled by default, so they must be enabled in ``local.conf`` before running ``stack.sh``. This example ``local.conf`` file shows all of the settings required for @@ -51,7 +38,4 @@ Configuring devstack # Enable the Ceilometer devstack plugin enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer.git - By default, all ceilometer services except for ceilometer-ipmi agent will - be enabled - .. _devstack: http://www.devstack.org/ diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst index 447b1996..35fb7075 100644 --- a/doc/source/install/index.rst +++ b/doc/source/install/index.rst @@ -22,9 +22,6 @@ .. toctree:: :maxdepth: 2 - dbreco development manual - custom - upgrade mod_wsgi diff --git a/doc/source/install/manual.rst b/doc/source/install/manual.rst index 8011a32a..821427ef 100644 --- a/doc/source/install/manual.rst +++ b/doc/source/install/manual.rst @@ -27,67 +27,6 @@ Storage Backend Installation This step is a prerequisite for the collector and API services. You may use one of the listed database backends below to store Ceilometer data. -Gnocchi -------- - -1. Follow `Gnocchi installation`_ instructions - -2. Initialize Gnocchi for Ceilometer:: - - gnocchi-upgrade --create-legacy-resource-types - - .. note:: - - Prior to Gnocchi 2.1, Ceilometer resource types were included, therefore - --create-legacy-resource-types flag is not needed. - -3. Edit `/etc/ceilometer/ceilometer.conf` for the collector service:: - - [DEFAULT] - meter_dispatchers = gnocchi - event_dispatchers = - - [notification] - store_events = False - - [dispatcher_gnocchi] - filter_service_activity = False # Enable if using swift backend - filter_project = # if using swift backend - - [service_credentials] - auth_url = :5000 - region_name = RegionOne - password = password - username = ceilometer - project_name = service - project_domain_id = default - user_domain_id = default - auth_type = password - -4. Copy gnocchi_resources.yaml to config directory (e.g./etc/ceilometer) - -5. To minimize data requests, caching and batch processing should be enabled: - - 1. Enable resource caching (oslo.cache_ should be installed):: - - [cache] - backend_argument = redis_expiration_time:600 - backend_argument = db:0 - backend_argument = distributed_lock:True - backend_argument = url:redis://localhost:6379 - backend = dogpile.cache.redis - - 2. Enable batch processing:: - - [collector] - batch_size = 100 - batch_timeout = 5 - -6. Start collector service - -.. _oslo.cache: http://docs.openstack.org/developer/oslo.cache/opts.html - - MongoDB ------- @@ -113,253 +52,11 @@ SQLalchemy-supported DBs [database] connection = mysql+pymysql://username:password@host/ceilometer?charset=utf8 -HBase ------ - HBase backend is implemented to use HBase Thrift interface, therefore it is - mandatory to have the HBase Thrift server installed and running. To start - the Thrift server, please run the following command:: - - ${HBASE_HOME}/bin/hbase thrift start - - The implementation uses `HappyBase`_, which is a wrapper library used to - interact with HBase via Thrift protocol. You can verify the Thrift - connection by running a quick test from a client:: - - import happybase - - conn = happybase.Connection(host=$hbase-thrift-server, - port=9090, - table_prefix=None, - table_prefix_separator='_') - print conn.tables() # this returns a list of HBase tables in your HBase server - - .. note:: - - HappyBase version 0.5 or greater is required. Additionally, version 0.7 - is not currently supported. - - In the case of HBase, the required database tables (`project`, `user`, `resource`, - `meter`) should be created manually with `f` column family for each one. - - To use HBase as the storage backend, change the 'database' section in - ceilometer.conf as follows:: - - [database] - connection = hbase://hbase-thrift-host:9090 - - It is possible to customize happybase's `table_prefix` and `table_prefix_separator` - via query string. By default `table_prefix` is not set and `table_prefix_separator` - is '_'. When `table_prefix` is not specified `table_prefix_separator` is not taken - into account. E.g. the resource table in the default case will be 'resource' while - with `table_prefix` set to 'ceilo' and `table_prefix_separator` to '.' the resulting - table will be 'ceilo.resource'. For this second case this is the database connection - configuration:: - - [database] - connection = hbase://hbase-thrift-host:9090?table_prefix=ceilo&table_prefix_separator=. - - To ensure proper configuration, please add the following lines to the - `hbase-site.xml` configuration file:: - - - hbase.thrift.minWorkerThreads - 200 - - -.. _`Gnocchi installation`: http://docs.openstack.org/developer/gnocchi/install.html -.. _HappyBase: http://happybase.readthedocs.org/en/latest/index.html# .. _MongoDB: http://www.mongodb.org/ .. _pymongo: https://pypi.python.org/pypi/pymongo/ -Installing the notification agent -================================= - -.. index:: - double: installing; agent-notification - -1. Clone the ceilometer git repository to the management server:: - - $ cd /opt/stack - $ git clone https://git.openstack.org/openstack/ceilometer.git - -2. As a user with ``root`` permissions or ``sudo`` privileges, run the - ceilometer installer:: - - $ cd ceilometer - $ sudo python setup.py install - -3. Copy the sample configuration files from the source tree - to their final location:: - - $ mkdir -p /etc/ceilometer - $ cp etc/ceilometer/*.json /etc/ceilometer - $ cp etc/ceilometer/*.yaml /etc/ceilometer - $ cp etc/ceilometer/ceilometer.conf.sample /etc/ceilometer/ceilometer.conf - -4. Edit ``/etc/ceilometer/ceilometer.conf`` - - 1. Configure messaging:: - - [oslo_messaging_notifications] - topics = notifications - - [oslo_messaging_rabbit] - rabbit_userid = stackrabbit - rabbit_password = openstack1 - rabbit_hosts = 10.0.2.15 - - 2. Set the ``telemetry_secret`` value. - - Set the ``telemetry_secret`` value to a large, random, value. Use - the same value in all ceilometer configuration files, on all - nodes, so that messages passing between the nodes can be - validated. This value can be left empty to disable message signing. - - .. note:: - - Disabling signing will improve message handling performance - - Refer to :doc:`/configuration` for details about any other options - you might want to modify before starting the service. - -5. Start the notification daemon:: - - $ ceilometer-agent-notification - - .. note:: - - The default development configuration of the collector logs to - stderr, so you may want to run this step using a screen session - or other tool for maintaining a long-running program in the - background. - - -Installing the collector -======================== - -.. index:: - double: installing; collector - -.. _storage_backends: - -1. Clone the ceilometer git repository to the management server:: - - $ cd /opt/stack - $ git clone https://git.openstack.org/openstack/ceilometer.git - -2. As a user with ``root`` permissions or ``sudo`` privileges, run the - ceilometer installer:: - - $ cd ceilometer - $ sudo python setup.py install - -3. Copy the sample configuration files from the source tree - to their final location:: - - $ mkdir -p /etc/ceilometer - $ cp etc/ceilometer/*.json /etc/ceilometer - $ cp etc/ceilometer/*.yaml /etc/ceilometer - $ cp etc/ceilometer/ceilometer.conf.sample /etc/ceilometer/ceilometer.conf - -4. Edit ``/etc/ceilometer/ceilometer.conf`` - - 1. Configure messaging:: - - [oslo_messaging_notifications] - topics = notifications - - [oslo_messaging_rabbit] - rabbit_userid = stackrabbit - rabbit_password = openstack1 - rabbit_hosts = 10.0.2.15 - - 2. Set the ``telemetry_secret`` value (if enabled for notification agent) - - Refer to :doc:`/configuration` for details about any other options - you might want to modify before starting the service. - -5. Start the collector:: - - $ ceilometer-collector - - .. note:: - - The default development configuration of the collector logs to - stderr, so you may want to run this step using a screen session - or other tool for maintaining a long-running program in the - background. - -Installing the Polling Agent -============================ - -.. index:: - double: installing; agent - -.. note:: - - The polling agent needs to be able to talk to Keystone and any of - the services being polled for updates. It also needs to run on your compute - nodes to poll instances. - -1. Clone the ceilometer git repository to the server:: - - $ cd /opt/stack - $ git clone https://git.openstack.org/openstack/ceilometer.git - -2. As a user with ``root`` permissions or ``sudo`` privileges, run the - ceilometer installer:: - - $ cd ceilometer - $ sudo python setup.py install - -3. Copy the sample configuration files from the source tree - to their final location:: - - $ mkdir -p /etc/ceilometer - $ cp etc/ceilometer/*.json /etc/ceilometer - $ cp etc/ceilometer/*.yaml /etc/ceilometer - $ cp etc/ceilometer/ceilometer.conf.sample /etc/ceilometer/ceilometer.conf - -4. Configure messaging by editing ``/etc/ceilometer/ceilometer.conf``:: - - [oslo_messaging_notifications] - topics = notifications - - [oslo_messaging_rabbit] - rabbit_userid = stackrabbit - rabbit_password = openstack1 - rabbit_hosts = 10.0.2.15 - -5. In order to retrieve object store statistics, ceilometer needs - access to swift with ``ResellerAdmin`` role. You should give this - role to your ``os_username`` user for tenant ``os_tenant_name``:: - - $ openstack role create ResellerAdmin - +-----------+----------------------------------+ - | Field | Value | - +-----------+----------------------------------+ - | domain_id | None | - | id | f5153dae801244e8bb4948f0a6fb73b7 | - | name | ResellerAdmin | - +-----------+----------------------------------+ - - $ openstack role add f5153dae801244e8bb4948f0a6fb73b7 \ - --project $SERVICE_TENANT \ - --user $CEILOMETER_USER - -6. Start the agent:: - - $ ceilometer-polling - -7. By default, the polling agent polls the `compute` and `central` namespaces. - You can specify which namespace to poll in the `ceilometer.conf` - configuration file or on the command line:: - - $ ceilometer-polling --polling-namespaces central,ipmi - - Installing the API Server ========================= @@ -392,22 +89,12 @@ Installing the API Server $ cp etc/ceilometer/*.yaml /etc/ceilometer $ cp etc/ceilometer/ceilometer.conf.sample /etc/ceilometer/ceilometer.conf -4. Configure messaging by editing ``/etc/ceilometer/ceilometer.conf``:: - - [oslo_messaging_notifications] - topics = notifications - - [oslo_messaging_rabbit] - rabbit_userid = stackrabbit - rabbit_password = openstack1 - rabbit_hosts = 10.0.2.15 - -5. Create a service for ceilometer in keystone:: +4. Create a service for ceilometer in keystone:: $ openstack service create metering --name=ceilometer \ --description="Ceilometer Service" -6. Create an endpoint in keystone for ceilometer:: +5. Create an endpoint in keystone for ceilometer:: $ openstack endpoint create $CEILOMETER_SERVICE \ --region RegionOne \ @@ -422,7 +109,7 @@ Installing the API Server default port value for ceilometer API is 8777. If the port value has been customized, adjust accordingly. -7. Choose and start the API server. +6. Choose and start the API server. Ceilometer includes the ``ceilometer-api`` command. This can be used to run the API server. For smaller or proof-of-concept @@ -442,81 +129,3 @@ Installing the API Server The development version of the API server logs to stderr, so you may want to run this step using a screen session or other tool for maintaining a long-running program in the background. - - -Enabling Service Notifications -============================== - -Cinder ------- - -Edit ``cinder.conf`` to include:: - - [oslo_messaging_notifications] - driver = messagingv2 - -Glance ------- - -Edit ``glance.conf`` to include:: - - [oslo_messaging_notifications] - driver = messagingv2 - -Heat ----- - -Configure the driver in ``heat.conf``:: - - [oslo_messaging_notifications] - driver=messagingv2 - -Nova ----- - -Edit ``nova.conf`` to include:: - - [DEFAULT] - instance_usage_audit=True - instance_usage_audit_period=hour - notify_on_state_change=vm_and_task_state - - [oslo_messaging_notifications] - driver=messagingv2 - - -Sahara ------- - -Configure the driver in ``sahara.conf``:: - - [DEFAULT] - enable_notifications=true - - [oslo_messaging_notifications] - driver=messagingv2 - - -Swift ------ - -Edit ``proxy-server.conf`` to include:: - - [filter:ceilometer] - topic = notifications - driver = messaging - url = rabbit://stackrabbit:openstack1@10.0.2.15:5672/ - control_exchange = swift - paste.filter_factory = ceilometermiddleware.swift:filter_factory - set log_level = WARN - -and edit [pipeline:main] to include the ceilometer middleware before the application:: - - [pipeline:main] - pipeline = catch_errors ... ... ceilometer proxy-server - - -Also, you need to configure messaging related options correctly as written above -for other parts of installation guide. Refer to :doc:`/configuration` for -details about any other options you might want to modify before starting the -service. diff --git a/doc/source/install/upgrade.rst b/doc/source/install/upgrade.rst deleted file mode 100644 index 7994b9ee..00000000 --- a/doc/source/install/upgrade.rst +++ /dev/null @@ -1,114 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _upgrade: - -========== - Upgrading -========== - -Ceilometer's services support both full upgrades as well as partial -(rolling) upgrades. The required steps for each process are described below. - - -Full upgrades -============= - -The following describes how to upgrade your entire Ceilometer environment in -one pass. - -.. _full upgrade path: - -1. Upgrade the database (if applicable) - - Run ceilometer-dbsync to upgrade the database if using one of Ceilometer's - databases (see :ref:`choosing_db_backend`). The database does not need to be - taken offline as no data is modified or deleted. Ideally this should be done - during a period of low activity. Best practices should still be followed - (ie. back up your data). If not using a Ceilometer database, you should - consult the documentation of that storage beforehand. - -2. Upgrade the collector service(s) - - Shutdown all collector services. The new collector, that knows how to - interpret the new payload, can then be started. It will disregard any - historical attributes and can continue to process older data from the - agents. You may restart as many new collectors as required. - -3. Upgrade the notification agent(s) - - The notification agent can then be taken offline and upgraded with the - same conditions as the collector service. - -4. Upgrade the polling agent(s) - - In this path, you'll want to take down agents on all hosts before starting. - After starting the first agent, you should verify that data is again being - polled. Additional agents can be added to support coordination if enabled. - -.. note:: - - The API service can be taken offline and upgraded at any point in the - process (if applicable). - - -Partial upgrades -================ - -The following describes how to upgrade parts of your Ceilometer environment -gradually. The ultimate goal is to have all services upgraded to the new -version in time. - -1. Upgrade the database (if applicable) - - Upgrading the database here is the same as the `full upgrade path`_. - -2. Upgrade the collector service(s) - - The new collector services can be started alongside the old collectors. - Collectors old and new will disregard any new or historical attributes. - -3. Upgrade the notification agent(s) - - The new notification agent can be started alongside the old agent if no - workload_partioning is enabled OR if it has the same pipeline configuration. - If the pipeline configuration is changed, the old agents must be loaded with - the same pipeline configuration first to ensure the notification agents all - work against same pipeline sets. - -4. Upgrade the polling agent(s) - - The new polling agent can be started alongside the old agent only if no new - pollsters were added. If not, new polling agents must start only in its - own partitioning group and poll only the new pollsters. After all old agents - are upgraded, the polling agents can be changed to poll both new pollsters - AND the old ones. - -5. Upgrade the API service(s) - - API management is handled by WSGI so there is only ever one version of API - service running - -.. note:: - - Upgrade ordering does not matter in partial upgrade path. The only - requirement is that the database be upgraded first. It is advisable to - upgrade following the same ordering as currently described: database, - collector, notification agent, polling agent, api. - - -Developer notes -=============== - -When updating data models in the database or IPC, we need to adhere to a single -mantra: 'always add, never delete or modify.' diff --git a/doc/source/measurements.rst b/doc/source/measurements.rst deleted file mode 100644 index f61cf4c4..00000000 --- a/doc/source/measurements.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. - Copyright 2012 New Dream Network (DreamHost) - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _measurements: - -============== - Measurements -============== - -Existing meters -=============== - -For the list of existing meters see the tables under the -`Measurements page`_ of Ceilometer in the Administrator Guide. - -.. _Measurements page: http://docs.openstack.org/admin-guide/telemetry-measurements.html - -Adding new meters -================= - -If you would like to add new meters please check the -:ref:`add_new_meters` page under in the Contributing -section. diff --git a/doc/source/new_meters.rst b/doc/source/new_meters.rst deleted file mode 100644 index aed02a69..00000000 --- a/doc/source/new_meters.rst +++ /dev/null @@ -1,115 +0,0 @@ -.. - Copyright 2012 New Dream Network (DreamHost) - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _add_new_meters: - -================== - New measurements -================== - -Ceilometer is designed to collect measurements from OpenStack services and -from other external components. If you would like to add new meters to the -currently existing ones, you need to follow the guidelines given in this -section. - -.. _meter_types: - -Types -===== - -Three type of meters are defined in Ceilometer: - -.. index:: - double: meter; cumulative - double: meter; gauge - double: meter; delta - -========== ============================================================================== -Type Definition -========== ============================================================================== -Cumulative Increasing over time (instance hours) -Gauge Discrete items (floating IPs, image uploads) and fluctuating values (disk I/O) -Delta Changing over time (bandwidth) -========== ============================================================================== - -When you're about to add a new meter choose one type from the above list, which -is applicable. - - -Units -===== - -1. Whenever a volume is to be measured, SI approved units and their - approved symbols or abbreviations should be used. Information units - should be expressed in bits ('b') or bytes ('B'). -2. For a given meter, the units should NEVER, EVER be changed. -3. When the measurement does not represent a volume, the unit - description should always describe WHAT is measured (ie: apples, - disk, routers, floating IPs, etc.). -4. When creating a new meter, if another meter exists measuring - something similar, the same units and precision should be used. -5. Meters and samples should always document their units in Ceilometer (API - and Documentation) and new sampling code should not be merged without the - appropriate documentation. - -============ ======== ============== ======================= -Dimension Unit Abbreviations Note -============ ======== ============== ======================= -None N/A Dimension-less variable -Volume byte B -Time seconds s -============ ======== ============== ======================= - - -Meters -====== - -Naming convention ------------------ - -If you plan on adding meters, please follow the convention below: - -1. Always use '.' as separator and go from least to most discriminant word. - For example, do not use ephemeral_disk_size but disk.ephemeral.size - -2. When a part of the name is a variable, it should always be at the end and start with a ':'. - For example, do not use .image but image:, where type is your variable name. - -3. If you have any hesitation, come and ask in #openstack-ceilometer - -Meter definitions ------------------ -Meters definitions by default, are stored in separate configuration -file, called :file:`ceilometer/meter/data/meter.yaml`. This is essentially -a replacement for prior approach of writing notification handlers to consume -specific topics. - -A detailed description of how to use meter definition is illustrated in -the `admin_guide`_. - -.. _admin_guide: http://docs.openstack.org/admin-guide/telemetry-data-collection.html#meter-definitions - -Non-metric meters and events ----------------------------- - -Ceilometer supports collecting notifications as events. It is highly -recommended to use events for capturing if something happened in the system -or not as opposed to defining meters of which volume will be constantly '1'. -Events enable better representation and querying of metadata rather than -statistical aggregations required for Samples. When the event support is -turned on for Ceilometer, event type meters are collected into the event -database too, which can lead to the duplication of a huge amount of data. - -In order to learn more about events see the :ref:`events` section. diff --git a/doc/source/overview.rst b/doc/source/overview.rst deleted file mode 100644 index 602522ea..00000000 --- a/doc/source/overview.rst +++ /dev/null @@ -1,49 +0,0 @@ -======== -Overview -======== - -Objectives -========== - -The Ceilometer project was started in 2012 with one simple goal in mind: to -provide an infrastructure to collect any information needed regarding -OpenStack projects. It was designed so that rating engines could use this -single source to transform events into billable items which we -label as "metering". - -As the project started to come to life, collecting an -`increasing number of meters`_ across multiple projects, the OpenStack -community started to realize that a secondary goal could be added to -Ceilometer: become a standard way to collect meter, regardless of the -purpose of the collection. For example, Ceilometer can now publish information -for monitoring, debugging and graphing tools in addition or in parallel to the -metering backend. We labelled this effort as "multi-publisher". - -.. _increasing number of meters: http://docs.openstack.org/developer/ceilometer/measurements.html - -Metering -======== - -If you divide a billing process into a 3 step process, as is commonly done in -the telco industry, the steps are: - -1. :term:`Metering` -2. :term:`Rating` -3. :term:`Billing` - -Ceilometer's initial goal was, and still is, strictly limited to step -one. This is a choice made from the beginning not to go into rating or billing, -as the variety of possibilities seemed too large for the project to ever -deliver a solution that would fit everyone's needs, from private to public -clouds. This means that if you are looking at this project to solve your -billing needs, this is the right way to go, but certainly not the end of the -road for you. Once Ceilometer is in place on your OpenStack deployment, you -will still have several things to do before you can produce a bill for your -customers. One of you first task could be: finding the right queries within the -Ceilometer API to extract the information you need for your very own rating -engine. - -.. seealso:: - - * http://wiki.openstack.org/EfficientMetering/ArchitectureProposalV1 - * http://wiki.openstack.org/EfficientMetering#Architecture diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst deleted file mode 100644 index ea744f27..00000000 --- a/doc/source/plugins.rst +++ /dev/null @@ -1,177 +0,0 @@ -.. - Copyright 2012 Nicolas Barcet for Canonical - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _plugins-and-containers: - -======================= - Writing Agent Plugins -======================= - -This documentation gives you some clues on how to write a new agent or -plugin for Ceilometer if you wish to instrument a measurement which -has not yet been covered by an existing plugin. - -Agents -====== - -Polling agent might be run either on central cloud management nodes or on the -compute nodes (where direct hypervisor polling is quite logical). - -The agent running on each compute node polls for compute resources -usage. Each meter collected is tagged with the resource ID (such as -an instance) and the owner, including tenant and user IDs. The meters -are then reported to the collector via the message bus. More detailed -information follows. - -The agent running on the cloud central management node polls other types of -resources from a management server (usually using OpenStack services API to -collect this data). - -The polling agent is implemented in ``ceilometer/agent/manager.py``. As -you will see in the manager, the agent loads all plugins defined in -the namespace ``ceilometer.poll.agent``, then periodically calls their -:func:`get_samples` method. - -Plugins -======= - -A polling agent can support multiple plugins to retrieve different -information and send them to the collector. As stated above, an agent -will automatically activate all possible plugins if no additional information -about what to poll was passed. Previously we had separated compute and -central agents with different namespaces with plugins (pollsters) defined -within. Currently we keep separated namespaces - ``ceilometer.poll.compute`` -and ``ceilometer.poll.central`` for quick separation of what to poll depending -on where is polling agent running. This will load, among others, the -:class:`ceilometer.compute.pollsters.cpu.CPUPollster`, which is defined in -the folder ``ceilometer/compute/pollsters``. - -Notifications mechanism uses plugins as well, for instance -:class:`ceilometer.telemetry.notifications.TelemetryApiPost` plugin -which is defined in the ``ceilometer/telemetry/notifications`` folder, Though -in most cases, this is not needed. A meter definition can be directly added -to :file:`ceilometer/meter/data/meter.yaml` to match the event type. For -more information, see the :ref:`add_new_meters` page. - -We are using these two existing plugins as examples as the first one provides -an example of how to interact when you need to retrieve information from an -external system (pollster) and the second one is an example of how to forward -an existing event notification on the standard OpenStack queue to ceilometer. - -Pollster --------- - -Compute plugins are defined as subclasses of the -:class:`ceilometer.compute.BaseComputePollster` class as defined in -the ``ceilometer/compute/__init__.py`` file. Pollsters must implement one -method: ``get_samples(self, manager, context)``, which returns a -sequence of ``Sample`` objects as defined in the -``ceilometer/sample.py`` file. - -In the ``CPUPollster`` plugin, the ``get_samples`` method is implemented as a -loop which, for each instances running on the local host, retrieves the -cpu_time from the hypervisor and sends back two ``Sample`` objects. The first -one, named "cpu", is of type "cumulative", meaning that between two polls, its -value is not reset while the instance remains active, or in other words that -the CPU value is always provided as a duration that continuously increases -since the creation of the instance. The second one, named "cpu_util", is of -type "gauge", meaning that its value is the percentage of cpu utilization. - -Note that the ``LOG`` method is only used as a debugging tool and does not -participate in the actual metering activity. - -There is the way to specify either namespace(s) with pollsters or just -list of concrete pollsters to use, or even both of these parameters on the -polling agent start via CLI parameter: - - ceilometer-polling --polling-namespaces central compute - -This command will basically make polling agent to load all plugins from the -central and compute namespaces and poll everything it can. If you need to load -only some of the pollsters, you can use ``pollster-list`` option: - - ceilometer-polling --pollster-list image image.size storage.* - -If both of these options are passed, the polling agent will load only those -pollsters specified in the pollster list, that can be loaded from the selected -namespaces. - -.. note:: - - Agents coordination cannot be used in case of pollster-list option usage. - This allows to avoid both samples duplication and their lost. - -Notifications -------------- - -.. note:: - This should only be needed for cases where a complex arithmetic or - non-primitive data types are used. In most cases, adding a meter - definition to the :file:`ceilometer/meter/data/meter.yaml` should - suffice. - -Notifications are defined as subclass of the -:class:`ceilometer.agent.plugin_base.NotificationBase` meta class. -Notifications must implement: - - ``event_types`` which should be a sequence of strings defining the event types to be given to the plugin and - - ``process_notification(self, message)`` which receives an event message from the list provided to event_types and returns a sequence of Sample objects as defined in the ``ceilometer/sample.py`` file. - -In the ``InstanceNotifications`` plugin, it listens to three events: - -* compute.instance.create.end - -* compute.instance.exists - -* compute.instance.delete.start - -using the ``get_event_type`` method and subsequently the method -``process_notification`` will be invoked each time such events are happening which -generates the appropriate sample objects to be sent to the collector. - -Adding new plugins ------------------- - -Although we have described a list of the meters Ceilometer should -collect, we cannot predict all of the ways deployers will want to -measure the resources their customers use. This means that Ceilometer -needs to be easy to extend and configure so it can be tuned for each -installation. A plugin system based on `setuptools entry points`_ -makes it easy to add new monitors in the agents. In particular, -Ceilometer now uses Stevedore_, and you should put your entry point -definitions in the ``entry_points.txt`` file of your Ceilometer egg. - -.. _setuptools entry points: http://pythonhosted.org/setuptools/setuptools.html#dynamic-discovery-of-services-and-plugins - -.. _Stevedore: http://stevedore.readthedocs.org - -Installing a plugin automatically activates it the next time the -ceilometer daemon starts. Rather than running and reporting errors or -simply consuming cycles for no-ops, plugins may disable themselves at -runtime based on configuration settings defined by other components (for example, the -plugin for polling libvirt does not run if it sees that the system is -configured using some other virtualization tool). Additionally, if no -valid resources can be discovered the plugin will be disabled. - - -Tests -===== -Any new plugin or agent contribution will only be accepted into the project if -provided together with unit tests. Those are defined for the compute agent -plugins in the directory ``tests/compute`` and for the agent itself in ``test/agent``. -Unit tests are run in a continuous integration process for each commit made to -the project, thus ensuring as best as possible that a given patch has no side -effect to the rest of the project. diff --git a/doc/source/releasenotes/folsom.rst b/doc/source/releasenotes/folsom.rst deleted file mode 100644 index 5b08b872..00000000 --- a/doc/source/releasenotes/folsom.rst +++ /dev/null @@ -1,61 +0,0 @@ -.. - Copyright 2012 Nicolas Barcet for Canonical - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -.. _folsom: - -==================== -Folsom -==================== - -This is the first release (Version 0.1) of Ceilometer. Please take all appropriate -caution in using it, as it is a technology preview at this time. - -Version of OpenStack - It is currently tested to work with OpenStack 2012.2 Folsom. Due to its use of - openstack-common, and the modification that were made in term of notification - to many other components (glance, cinder, quantum), it will not easily work - with any prior version of OpenStack. - -Components - Currently covered components are: Nova, Nova-network, Glance, Cinder and - Quantum. Notably, there is no support yet for Swift and it was decided not - to support nova-volume in favor of Cinder. A detailed list of meters covered - per component can be found at in :ref:`measurements`. - -Nova with libvirt only - Most of the Nova meters will only work with libvirt fronted hypervisors at the - moment, and our test coverage was mostly done on KVM. Contributors are welcome - to implement other virtualization backends' meters. - -Quantum delete events - Quantum delete notifications do not include the same metadata as the other - messages, so we ignore them for now. This isn't ideal, since it may mean we - miss charging for some amount of time, but it is better than throwing away the - existing metadata for a resource when it is deleted. - -Database backend - The only tested and complete database backend is currently MongoDB, the - SQLAlchemy one is still work in progress. - -Installation - The current best source of information on how to deploy this project is found - as the devstack implementation but feel free to come to #openstack-metering on - freenode for more info. - -Volume of data - Please note that metering can generate lots of data very quickly. Have a look - at the following spreadsheet to evaluate what you will end up with. - - http://wiki.openstack.org/EfficientMetering#Volume_of_data diff --git a/doc/source/releasenotes/index.rst b/doc/source/releasenotes/index.rst deleted file mode 100644 index 22a47d88..00000000 --- a/doc/source/releasenotes/index.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. - Copyright 2012 New Dream Network, LLC (DreamHost) - - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -============================ - Release Notes -============================ - -.. toctree:: - :hidden: - - folsom - -* :ref:`folsom` -* `Havana`_ -* `Icehouse`_ -* `Juno`_ -* `Kilo`_ -* `Liberty`_ - -Since Mitaka development cycle, we start to host release notes on -`Ceilometer Release Notes`_ - -.. _Havana: https://wiki.openstack.org/wiki/ReleaseNotes/Havana#OpenStack_Metering_.28Ceilometer.29 -.. _IceHouse: https://wiki.openstack.org/wiki/ReleaseNotes/Icehouse#OpenStack_Telemetry_.28Ceilometer.29 -.. _Juno: https://wiki.openstack.org/wiki/ReleaseNotes/Juno#OpenStack_Telemetry_.28Ceilometer.29 -.. _Kilo: https://wiki.openstack.org/wiki/ReleaseNotes/Kilo#OpenStack_Telemetry_.28Ceilometer.29 -.. _Liberty: https://wiki.openstack.org/wiki/ReleaseNotes/Liberty#OpenStack_Telemetry_.28Ceilometer.29 -.. _Ceilometer Release Notes: http://docs.openstack.org/releasenotes/ceilometer/ diff --git a/doc/source/webapi/v2.rst b/doc/source/webapi/v2.rst index aa78256d..ebfbe686 100644 --- a/doc/source/webapi/v2.rst +++ b/doc/source/webapi/v2.rst @@ -4,81 +4,6 @@ V2 Web API ============ -Resources -========= - -.. rest-controller:: ceilometer.api.controllers.v2.resources:ResourcesController - :webprefix: /v2/resources - -.. autotype:: ceilometer.api.controllers.v2.resources.Resource - :members: - -Meters -====== - -.. rest-controller:: ceilometer.api.controllers.v2.meters:MetersController - :webprefix: /v2/meters - -.. rest-controller:: ceilometer.api.controllers.v2.meters:MeterController - :webprefix: /v2/meters - -.. autotype:: ceilometer.api.controllers.v2.meters.Meter - :members: - -.. autotype:: ceilometer.api.controllers.v2.meters.OldSample - :members: - -Samples and Statistics -====================== - -.. rest-controller:: ceilometer.api.controllers.v2.samples:SamplesController - :webprefix: /v2/samples - -.. autotype:: ceilometer.api.controllers.v2.samples.Sample - :members: - -.. autotype:: ceilometer.api.controllers.v2.meters.Statistics - :members: - -When a simple statistics request is invoked (using GET /v2/meters//statistics), -it will return the standard set of *Statistics*: *avg*, *sum*, *min*, *max*, and *count*. - -.. note:: - - If using Ceilometer data for statistics, it's recommended to use a backend - such as Gnocchi_ rather than Ceilometer's interface. Gnocchi is designed - specifically for this use case by providing a light-weight, aggregated model. - As they manage data differently, the API models returned by Ceilometer and Gnocchi - are different. The Gnocchi API can be found here_. - -.. _Gnocchi: http://docs.openstack.org/developer/gnocchi/ -.. _here: http://docs.openstack.org/developer/gnocchi/rest.html - -Selectable Aggregates -+++++++++++++++++++++ - -The Statistics API has been extended to include the aggregate functions -*stddev* and *cardinality*. You can explicitly select these functions or any -from the standard set by specifying an aggregate function in the statistics -query:: - - GET /v2/meters//statistics?aggregate.func=&aggregate.param= - -(where aggregate.param is optional). - -Duplicate aggregate function and parameter pairs are silently discarded from the statistics query. Partial duplicates, in the sense of the same function but differing parameters, for example:: - - GET /v2/meters//statistics?aggregate.func=cardinality&aggregate.param=resource_id&aggregate.func=cardinality&aggregate.param=project_id - -are, on the other hand, both allowed by the API and supported by the storage drivers. See the :ref:`functional-examples` section for more detail. - -.. note:: - - Currently only *cardinality* needs aggregate.param to be specified. - -.. autotype:: ceilometer.api.controllers.v2.meters.Aggregate - :members: - Capabilities ============ @@ -163,518 +88,3 @@ applied on trait. See :ref:`api-queries` for how to query the API. .. autotype:: ceilometer.api.controllers.v2.events.EventQuery :members: - -Complex Query -+++++++++++++ - -The filter expressions of the Complex Query feature operate on the fields -of *Sample*. The following comparison operators are -supported: *=*, *!=*, *<*, *<=*, *>*, *>=* and *in*; and the following logical -operators can be used: *and* *or* and *not*. The field names are validated -against the database models. See :ref:`api-queries` for how to query the API. - -.. note:: - - The *not* operator has different meaning in MongoDB and in SQL DB engine. - If the *not* operator is applied on a non existent metadata field then - the result depends on the DB engine. For example, if - {"not": {"metadata.nonexistent_field" : "some value"}} filter is used in a query - the MongoDB will return every Sample object as *not* operator evaluated true - for every Sample where the given field does not exists. See more in the MongoDB doc. - On the other hand, SQL based DB engine will return empty result as the join operation - on the metadata table will return zero rows as the on clause of the join which - tries to match on the metadata field name is never fulfilled. - -Complex Query supports defining the list of orderby expressions in the form -of [{"field_name": "asc"}, {"field_name2": "desc"}, ...]. - -The number of the returned items can be bounded using the *limit* option. - -The *filter*, *orderby* and *limit* are all optional fields in a query. - -.. rest-controller:: ceilometer.api.controllers.v2.query:QuerySamplesController - :webprefix: /v2/query/samples - -.. autotype:: ceilometer.api.controllers.v2.query.ComplexQuery - :members: - -Links -===== - -.. autotype:: ceilometer.api.controllers.v2.base.Link - :members: - -API and CLI query examples -========================== - -CLI Queries -+++++++++++ - -Ceilometer CLI Commands:: - - $ ceilometer --debug --os-username --os-password --os-auth-url http://localhost:5000/v2.0/ --os-tenant-name admin meter-list - -.. note:: - - The *username*, *password*, and *tenant-name* options are required to be - present in these arguments or specified via environment variables. Note that - the in-line arguments will override the environment variables. - -.. _api-queries: - -API Queries -+++++++++++ - -Ceilometer API calls: - -.. note:: - - To successfully query Ceilometer you must first get a project-specific - token from the Keystone service and add it to any API calls that you - execute against that project. See the - `OpenStack credentials documentation `_ - for additional details. - -A simple query to return a list of available meters:: - - curl -H 'X-Auth-Token: ' \ - "http://localhost:8777/v2/meters" - -A query to return the list of resources:: - - curl -H 'X-Auth-Token: ' \ - "http://localhost:8777/v2/resources" - -A query to return the list of samples, limited to a specific meter type:: - - curl -H 'X-Auth-Token: ' \ - "http://localhost:8777/v2/meters/disk.root.size" - -A query using filters (see: `query filter section `_):: - - curl -H 'X-Auth-Token: ' \ - "http://localhost:8777/v2/meters/instance?q.field=metadata.event_type&q.value=compute.instance.delete.start" - -Additional examples:: - - curl -H 'X-Auth-Token: ' \ - "http://localhost:8777/v2/meters/disk.root.size?q.field=resource_id&q.op=eq&q.value=" - -or:: - - curl -H 'X-Auth-Token: ' \ - "http://localhost:8777/v2/meters/instance?q.field=metadata.event_type&q.value=compute.instance.exists" - -You can specify multiple filters by using an array of queries (order matters):: - - curl -H 'X-Auth-Token: ' \ - "http://localhost:8777/v2/meters/instance"\ - "?q.field=metadata.event_type&q.value=compute.instance.exists"\ - "&q.field=timestamp&q.op=gt&q.value=2013-07-03T13:34:17" - -A query to find the maximum value and standard deviation (*max*, *stddev*) of -the CPU utilization for a given instance (identified by *resource_id*):: - - curl -H 'X-Auth-Token: ' \ - "http://localhost:8777/v2/meters/cpu_util/statistics?aggregate.func=max&aggregate.func=stddev"\ - "&q.field=resource_id&q.op=eq&q.value=64da755c-9120-4236-bee1-54acafe24980" - -.. note:: - - If any of the requested aggregates are not supported by the storage driver, - a HTTP 400 error code will be returned along with an appropriate error - message. - -JSON based example:: - - curl -X GET -H "X-Auth-Token: " -H "Content-Type: application/json" - -d '{"q": [{"field": "timestamp", "op": "ge", "value": "2014-04-01T13:34:17"}]}' - http://localhost:8777/v2/meters/instance - -JSON based example with multiple filters:: - - curl -X GET -H "X-Auth-Token: " -H "Content-Type: application/json" - -d '{"q": [{"field": "timestamp", "op": "ge", "value": "2014-04-01T13:34:17"}, - {"field": "resource_id", "op": "eq", "value": "4da2b992-0dc3-4a7c-a19a-d54bf918de41"}]}' - http://localhost:8777/v2/meters/instance - -.. _functional-examples: - -Functional examples -+++++++++++++++++++ - -The examples below are meant to help you understand how to query the -Ceilometer API to build custom meters report. The query parameters should -be encoded using one of the above methods, e.g. as the URL parameters or -as JSON encoded data passed to the GET request. - -Get the list of samples about instances running for June 2013:: - - GET /v2/meters/instance - q: [{"field": "timestamp", - "op": "ge", - "value": "2013-06-01T00:00:00"}, - {"field": "timestamp", - "op": "lt", - "value": "2013-07-01T00:00:00"}] - - -Get the list of samples about instances running for June 2013 for a particular -project:: - - GET /v2/meters/instance - q: [{"field": "timestamp", - "op": "ge", - "value": "2013-06-01T00:00:00"}, - {"field": "timestamp", - "op": "lt", - "value": "2013-07-01T00:00:00"}, - {"field": "project_id", - "op": "eq", - "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}] - -Now you may want to have statistics on the meters you are targeting. -Consider the following example where you are getting the list of samples -about CPU utilization of a given instance (identified by its *resource_id*) -running for June 2013:: - - GET /v2/meters/cpu_util - q: [{"field": "timestamp", - "op": "ge", - "value": "2013-06-01T00:00:00"}, - {"field": "timestamp", - "op": "lt", - "value": "2013-07-01T00:00:00"}, - {"field": "resource_id", - "op": "eq", - "value": "64da755c-9120-4236-bee1-54acafe24980"}] - -You can have statistics on the list of samples requested (*avg*, *sum*, *max*, -*min*, *count*) computed on the full duration:: - - GET /v2/meters/cpu_util/statistics - q: [{"field": "timestamp", - "op": "ge", - "value": "2013-06-01T00:00:00"}, - {"field": "timestamp", - "op": "lt", - "value": "2013-07-01T00:00:00"}, - {"field": "resource_id", - "op": "eq", - "value": "64da755c-9120-4236-bee1-54acafe24980"}] - -You may want to aggregate samples over a given period (10 minutes for -example) in order to get an array of the statistics computed on smaller -durations:: - - GET /v2/meters/cpu_util/statistics - q: [{"field": "timestamp", - "op": "ge", - "value": "2013-06-01T00:00:00"}, - {"field": "timestamp", - "op": "lt", - "value": "2013-07-01T00:00:00"}, - {"field": "resource_id", - "op": "eq", - "value": "64da755c-9120-4236-bee1-54acafe24980"}] - period: 600 - -The *period* parameter aggregates by time range. You can also aggregate by -field using the *groupby* parameter. Currently, the *user_id*, *resource_id*, -*project_id*, and *source* fields are supported. Below is an example that uses -a query filter and group by aggregation on *project_id* and *resource_id*:: - - GET /v2/meters/instance/statistics - q: [{"field": "user_id", - "op": "eq", - "value": "user-2"}, - {"field": "source", - "op": "eq", - "value": "source-1"}] - groupby: ["project_id", "resource_id"] - -The statistics will be returned in a list, and each entry of the list will be -labeled with the group name. For the previous example, the first entry might -have *project_id* be "project-1" and *resource_id* be "resource-1", the second -entry have *project_id* be "project-1" and *resource_id* be "resource-2", and -so on. - -You can request both period and group by aggregation in the same query:: - - GET /v2/meters/instance/statistics - q: [{"field": "source", - "op": "eq", - "value": "source-1"}] - groupby: ["project_id"] - period: 7200 - -Note that period aggregation is applied first, followed by group by -aggregation. Order matters because the period aggregation determines the time -ranges for the statistics. - -Below is a real-life query:: - - GET /v2/meters/image/statistics - groupby: ["project_id", "resource_id"] - -With the return values:: - - [{"count": 4, "duration_start": "2013-09-18T19:08:33", "min": 1.0, - "max": 1.0, "duration_end": "2013-09-18T19:27:30", "period": 0, - "sum": 4.0, "period_end": "2013-09-18T19:27:30", "duration": 1137.0, - "period_start": "2013-09-18T19:08:33", "avg": 1.0, - "groupby": {"project_id": "c2334f175d8b4cb8b1db49d83cecde78", - "resource_id": "551f495f-7f49-4624-a34c-c422f2c5f90b"}, - "unit": "image"}, - {"count": 4, "duration_start": "2013-09-18T19:08:36", "min": 1.0, - "max": 1.0, "duration_end": "2013-09-18T19:27:30", "period": 0, - "sum": 4.0, "period_end": "2013-09-18T19:27:30", "duration": 1134.0, - "period_start": "2013-09-18T19:08:36", "avg": 1.0, - "groupby": {"project_id": "c2334f175d8b4cb8b1db49d83cecde78", - "resource_id": "7c1157ed-cf30-48af-a868-6c7c3ad7b531"}, - "unit": "image"}, - {"count": 4, "duration_start": "2013-09-18T19:08:34", "min": 1.0, - "max": 1.0, "duration_end": "2013-09-18T19:27:30", "period": 0, - "sum": 4.0, "period_end": "2013-09-18T19:27:30", "duration": 1136.0, - "period_start": "2013-09-18T19:08:34", "avg": 1.0, - "groupby": {"project_id": "c2334f175d8b4cb8b1db49d83cecde78", - "resource_id": "eaed9cf4-fc99-4115-93ae-4a5c37a1a7d7"}, - "unit": "image"}] - -You can request specific aggregate functions as well. For example, if you only -want the average CPU utilization, the GET request would look like this:: - - GET /v2/meters/cpu_util/statistics?aggregate.func=avg - -Use the same syntax to access the aggregate functions not in the standard set, -e.g. *stddev* and *cardinality*. A request for the standard deviation of CPU utilization would take the form:: - - GET /v2/meters/cpu_util/statistics?aggregate.func=stddev - -And would give a response such as the example:: - - [{"aggregate": {"stddev":0.6858829535841072}, - "duration_start": "2014-01-30T11:13:23", - "duration_end": "2014-01-31T16:07:13", - "duration": 104030.0, - "period": 0, - "period_start": "2014-01-30T11:13:23", - "period_end": "2014-01-31T16:07:13", - "groupby": null, - "unit" : "%"}] - -The request syntax is similar for *cardinality* but with the aggregate.param -option provided. So, for example, if you want to know the number of distinct -tenants with images, you would do:: - - GET /v2/meters/image/statistics?aggregate.func=cardinality - &aggregate.param=project_id - -For a more involved example, consider a requirement for determining, for some -tenant, the number of distinct instances (*cardinality*) as well as the total -number of instance samples (*count*). You might also want to see this -information with 15 minute long intervals. Then, using the *period* and -*groupby* options, a query would look like the following:: - - GET /v2/meters/instance/statistics?aggregate.func=cardinality - &aggregate.param=resource_id - &aggregate.func=count - &groupby=project_id&period=900 - -This would give an example response of the form:: - - [{"count": 19, - "aggregate": {"count": 19.0, "cardinality/resource_id": 3.0}, - "duration": 328.478029, - "duration_start": "2014-01-31T10:00:41.823919", - "duration_end": "2014-01-31T10:06:10.301948", - "period": 900, - "period_start": "2014-01-31T10:00:00", - "period_end": "2014-01-31T10:15:00", - "groupby": {"project_id": "061a5c91811e4044b7dc86c6136c4f99"}, - "unit": "instance"}, - {"count": 22, - "aggregate": {"count": 22.0, "cardinality/resource_id": 4.0}, - "duration": 808.00384, - "duration_start": "2014-01-31T10:15:15", - "duration_end": "2014-01-31T10:28:43.003840", - "period": 900, - "period_start": "2014-01-31T10:15:00", - "period_end": "2014-01-31T10:30:00", - "groupby": {"project_id": "061a5c91811e4044b7dc86c6136c4f99"}, - "unit": "instance"}, - {"count": 2, - "aggregate": {"count": 2.0, "cardinality/resource_id": 2.0}, - "duration": 0.0, - "duration_start": "2014-01-31T10:35:15", - "duration_end": "2014-01-31T10:35:15", - "period": 900, - "period_start": "2014-01-31T10:30:00", - "period_end": "2014-01-31T10:45:00", - "groupby": {"project_id": "061a5c91811e4044b7dc86c6136c4f99"}, - "unit": "instance"}] - -If you want to retrieve all the instances (not the list of samples, but the -resource itself) that have been run during this month for a given project, -you should ask the resource endpoint for the list of resources (all types: -including storage, images, networking, ...):: - - GET /v2/resources - q: [{"field": "timestamp", - "op": "ge", - "value": "2013-06-01T00:00:00"}, - {"field": "timestamp", - "op": "lt", - "value": "2013-07-01T00:00:00"}, - {"field": "project_id", - "op": "eq", - "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}] - -Then look for resources that have an *instance* meter linked to them. That -will indicate resources that have been measured as being instance. You can -then request their samples to have more detailed information, like their -state or their flavor:: - - GET /v2/meter/instance - q: [{"field": "timestamp", - "op": "ge", - "value": "2013-06-01T00:00:00"}, - {"field": "timestamp", - "op": "lt", - "value": "2013-07-01T00:00:00"}, - {"field": "resource_id", - "op": "eq", - "value": "64da755c-9120-4236-bee1-54acafe24980"}, - {"field": "project_id", - "op": "eq", - "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}] - -This will return a list of samples that have been recorded on this -particular resource. You can inspect them to retrieve information, such as -the instance state (check the *metadata.vm_state* field) or the instance -flavor (check the *metadata.flavor* field). -You can request nested metadata fields by using a dot to delimit the fields -(e.g. *metadata.weighted_host.host* for *instance.scheduled* meter) - -To retrieve only the 3 last samples of a meters, you can pass the *limit* -parameter to the query:: - - GET /v2/meter/instance - q: [{"field": "timestamp", - "op": "ge", - "value": "2013-06-01T00:00:00"}, - {"field": "timestamp", - "op": "lt", - "value": "2013-07-01T00:00:00"}, - {"field": "resource_id", - "op": "eq", - "value": "64da755c-9120-4236-bee1-54acafe24980"}, - {"field": "project_id", - "op": "eq", - "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}] - limit: 3 - -This query would only return the last 3 samples. - -Functional example for Complex Query -++++++++++++++++++++++++++++++++++++ - -This example demonstrates how complex query filter expressions can be generated and sent -to the /v2/query/samples endpoint of Ceilometer API using POST request. - -To check for *cpu_util* samples reported between 18:00-18:15 or between 18:30 - 18:45 -on a particular date (2013-12-01), where the utilization is between 23 and 26 percent, -but not exactly 25.12 percent, the following filter expression can be created:: - - {"and": - [{"and": - [{"=": {"counter_name": "cpu_util"}}, - {">": {"counter_volume": 0.23}}, - {"<": {"counter_volume": 0.26}}, - {"not": {"=": {"counter_volume": 0.2512}}}]}, - {"or": - [{"and": - [{">": {"timestamp": "2013-12-01T18:00:00"}}, - {"<": {"timestamp": "2013-12-01T18:15:00"}}]}, - {"and": - [{">": {"timestamp": "2013-12-01T18:30:00"}}, - {"<": {"timestamp": "2013-12-01T18:45:00"}}]}]}]} - -Different sorting criteria can be defined for the query filter, for example the results -can be ordered in an ascending order by the *counter_volume* and descending order based on -the *timestamp*. The following order by expression has to be created for specifying this -criteria:: - - [{"counter_volume": "ASC"}, {"timestamp": "DESC"}] - -As the current implementation accepts only string values as query filter and order by -definitions, the above defined expressions have to be converted to string values. -By adding a limit criteria to the request, which maximizes the number of returned samples -to four, the query looks like the following:: - - { - "filter" : "{\"and\":[{\"and\": [{\"=\": {\"counter_name\": \"cpu_util\"}}, {\">\": {\"counter_volume\": 0.23}}, {\"<\": {\"counter_volume\": 0.26}}, {\"not\": {\"=\": {\"counter_volume\": 0.2512}}}]}, {\"or\": [{\"and\": [{\">\": {\"timestamp\": \"2013-12-01T18:00:00\"}}, {\"<\": {\"timestamp\": \"2013-12-01T18:15:00\"}}]}, {\"and\": [{\">\": {\"timestamp\": \"2013-12-01T18:30:00\"}}, {\"<\": {\"timestamp\": \"2013-12-01T18:45:00\"}}]}]}]}", - "orderby" : "[{\"counter_volume\": \"ASC\"}, {\"timestamp\": \"DESC\"}]", - "limit" : 4 - } - -A query request looks like the following with curl:: - - curl -X POST -H 'X-Auth-Token: ' -H 'Content-Type: application/json' \ - -d '' \ - http://localhost:8777/v2/query/samples - -.. _user-defined-data: - -User-defined data -+++++++++++++++++ - -It is possible to add your own samples (created from data retrieved in any -way like monitoring agents on your instances) in Ceilometer to store -them and query on them. You can even get *Statistics* on your own inserted data. -By adding a *Sample* to a *Resource*, you create automatically the corresponding -*Meter* if it does not exist already. To achieve this, you have to POST a list -of one to many samples in JSON format:: - - curl -X POST -H 'X-Auth-Token: ' -H 'Content-Type: application/json' \ - -d '' \ - http://localhost:8777/v2/meters/ - -Fields *source*, *timestamp*, *project_id* and *user_id* are automatically -added if not present in the samples. Field *message_id* is not taken into -account if present and an internal value will be set. - -By default, samples posted via API will be placed on the notification bus and -processed by the notification agent. - -To avoid re-queuing the data, samples posted via API can be stored directly to -the storage backend verbatim by specifying a boolean flag 'direct' in the -request URL, like this:: - - POST /v2/meters/ram_util?direct=True - -Samples posted this way will bypass pipeline processing. - -Here is an example showing how to add a sample for a *ram_util* meter (already -existing or not):: - - POST /v2/meters/ram_util - body: [ - { - "counter_name": "ram_util", - "user_id": "4790fbafad2e44dab37b1d7bfc36299b", - "resource_id": "87acaca4-ae45-43ae-ac91-846d8d96a89b", - "resource_metadata": { - "display_name": "my_instance", - "my_custom_metadata_1": "value1", - "my_custom_metadata_2": "value2" - }, - "counter_unit": "%", - "counter_volume": 8.57762938230384, - "project_id": "97f9a6aaa9d842fcab73797d3abb2f53", - "counter_type": "gauge" - } - ] - -You get back the same list containing your example completed with the missing -fields : *source* and *timestamp* in this case. diff --git a/etc/apache2/ceilometer b/etc/apache2/ceilometer deleted file mode 100644 index 261acc3e..00000000 --- a/etc/apache2/ceilometer +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2013 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# This is an example Apache2 configuration file for using the -# ceilometer API through mod_wsgi. - -# Note: If you are using a Debian-based system then the paths -# "/var/log/httpd" and "/var/run/httpd" will use "apache2" instead -# of "httpd". -# -# The number of processes and threads is an example only and should -# be adjusted according to local requirements. - -Listen 8777 - - - WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=SOMEUSER display-name=%{GROUP} - WSGIProcessGroup ceilometer-api - WSGIScriptAlias / /var/www/ceilometer/app - WSGIApplicationGroup %{GLOBAL} - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/httpd/ceilometer_error.log - CustomLog /var/log/httpd/ceilometer_access.log combined - - -WSGISocketPrefix /var/run/httpd diff --git a/etc/ceilometer/README-ceilometer.conf.txt b/etc/ceilometer/README-ceilometer.conf.txt deleted file mode 100644 index db6d857e..00000000 --- a/etc/ceilometer/README-ceilometer.conf.txt +++ /dev/null @@ -1,4 +0,0 @@ -To generate the sample ceilometer.conf file, run the following -command from the top-level ceilometer directory: - -tox -egenconfig \ No newline at end of file diff --git a/etc/ceilometer/ceilometer-config-generator.conf b/etc/ceilometer/ceilometer-config-generator.conf index 4fc80454..eaf240c1 100644 --- a/etc/ceilometer/ceilometer-config-generator.conf +++ b/etc/ceilometer/ceilometer-config-generator.conf @@ -2,11 +2,8 @@ output_file = etc/ceilometer/ceilometer.conf wrap_width = 79 namespace = ceilometer -namespace = oslo.concurrency namespace = oslo.db namespace = oslo.log -namespace = oslo.messaging namespace = oslo.middleware.cors namespace = oslo.policy -namespace = oslo.service.service namespace = keystonemiddleware.auth_token diff --git a/etc/ceilometer/event_definitions.yaml b/etc/ceilometer/event_definitions.yaml deleted file mode 100644 index 784b4060..00000000 --- a/etc/ceilometer/event_definitions.yaml +++ /dev/null @@ -1,545 +0,0 @@ ---- -- event_type: compute.instance.* - traits: &instance_traits - tenant_id: - fields: payload.tenant_id - user_id: - fields: payload.user_id - instance_id: - fields: payload.instance_id - host: - fields: publisher_id.`split(., 1, 1)` - service: - fields: publisher_id.`split(., 0, -1)` - memory_mb: - type: int - fields: payload.memory_mb - disk_gb: - type: int - fields: payload.disk_gb - root_gb: - type: int - fields: payload.root_gb - ephemeral_gb: - type: int - fields: payload.ephemeral_gb - vcpus: - type: int - fields: payload.vcpus - instance_type_id: - type: int - fields: payload.instance_type_id - instance_type: - fields: payload.instance_type - state: - fields: payload.state - os_architecture: - fields: payload.image_meta.'org.openstack__1__architecture' - os_version: - fields: payload.image_meta.'org.openstack__1__os_version' - os_distro: - fields: payload.image_meta.'org.openstack__1__os_distro' - launched_at: - type: datetime - fields: payload.launched_at - deleted_at: - type: datetime - fields: payload.deleted_at -- event_type: compute.instance.exists - traits: - <<: *instance_traits - audit_period_beginning: - type: datetime - fields: payload.audit_period_beginning - audit_period_ending: - type: datetime - fields: payload.audit_period_ending -- event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*'] - traits: &cinder_traits - user_id: - fields: payload.user_id - project_id: - fields: payload.tenant_id - availability_zone: - fields: payload.availability_zone - display_name: - fields: payload.display_name - replication_status: - fields: payload.replication_status - status: - fields: payload.status - created_at: - fields: payload.created_at -- event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*'] - traits: - <<: *cinder_traits - resource_id: - fields: payload.volume_id - host: - fields: payload.host - size: - fields: payload.size - type: - fields: payload.volume_type - replication_status: - fields: payload.replication_status -- event_type: ['snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*'] - traits: - <<: *cinder_traits - resource_id: - fields: payload.snapshot_id - volume_id: - fields: payload.volume_id -- event_type: ['image_volume_cache.*'] - traits: - image_id: - fields: payload.image_id - host: - fields: payload.host -- event_type: ['image.update', 'image.upload', 'image.delete'] - traits: &glance_crud - project_id: - fields: payload.owner - resource_id: - fields: payload.id - name: - fields: payload.name - status: - fields: payload.status - created_at: - fields: payload.created_at - user_id: - fields: payload.owner - deleted_at: - fields: payload.deleted_at - size: - fields: payload.size -- event_type: image.send - traits: &glance_send - receiver_project: - fields: payload.receiver_tenant_id - receiver_user: - fields: payload.receiver_user_id - user_id: - fields: payload.owner_id - image_id: - fields: payload.image_id - destination_ip: - fields: payload.destination_ip - bytes_sent: - fields: payload.bytes_sent -- event_type: orchestration.stack.* - traits: &orchestration_crud - project_id: - fields: payload.tenant_id - user_id: - fields: ['_context_trustor_user_id', '_context_user_id'] - resource_id: - fields: payload.stack_identity -- event_type: sahara.cluster.* - traits: &sahara_crud - project_id: - fields: payload.project_id - user_id: - fields: _context_user_id - resource_id: - fields: payload.cluster_id -- event_type: sahara.cluster.health - traits: &sahara_health - <<: *sahara_crud - verification_id: - fields: payload.verification_id - health_check_status: - fields: payload.health_check_status - health_check_name: - fields: payload.health_check_name - health_check_description: - fields: payload.health_check_description - created_at: - type: datetime - fields: payload.created_at - updated_at: - type: datetime - fields: payload.updated_at -- event_type: ['identity.user.*', 'identity.project.*', 'identity.group.*', 'identity.role.*', 'identity.OS-TRUST:trust.*', - 'identity.region.*', 'identity.service.*', 'identity.endpoint.*', 'identity.policy.*'] - traits: &identity_crud - resource_id: - fields: payload.resource_info - initiator_id: - fields: payload.initiator.id - project_id: - fields: payload.initiator.project_id - domain_id: - fields: payload.initiator.domain_id -- event_type: identity.role_assignment.* - traits: &identity_role_assignment - role: - fields: payload.role - group: - fields: payload.group - domain: - fields: payload.domain - user: - fields: payload.user - project: - fields: payload.project -- event_type: identity.authenticate - traits: &identity_authenticate - typeURI: - fields: payload.typeURI - id: - fields: payload.id - action: - fields: payload.action - eventType: - fields: payload.eventType - eventTime: - fields: payload.eventTime - outcome: - fields: payload.outcome - initiator_typeURI: - fields: payload.initiator.typeURI - initiator_id: - fields: payload.initiator.id - initiator_name: - fields: payload.initiator.name - initiator_host_agent: - fields: payload.initiator.host.agent - initiator_host_addr: - fields: payload.initiator.host.address - target_typeURI: - fields: payload.target.typeURI - target_id: - fields: payload.target.id - observer_typeURI: - fields: payload.observer.typeURI - observer_id: - fields: payload.observer.id -- event_type: objectstore.http.request - traits: &objectstore_request - typeURI: - fields: payload.typeURI - id: - fields: payload.id - action: - fields: payload.action - eventType: - fields: payload.eventType - eventTime: - fields: payload.eventTime - outcome: - fields: payload.outcome - initiator_typeURI: - fields: payload.initiator.typeURI - initiator_id: - fields: payload.initiator.id - initiator_project_id: - fields: payload.initiator.project_id - target_typeURI: - fields: payload.target.typeURI - target_id: - fields: payload.target.id - target_action: - fields: payload.target.action - target_metadata_path: - fields: payload.target.metadata.path - target_metadata_version: - fields: payload.target.metadata.version - target_metadata_container: - fields: payload.target.metadata.container - target_metadata_object: - fields: payload.target.metadata.object - observer_id: - fields: payload.observer.id -- event_type: ['network.*', 'subnet.*', 'port.*', 'router.*', 'floatingip.*', 'pool.*', 'vip.*', 'member.*', 'health_monitor.*', 'healthmonitor.*', 'listener.*', 'loadbalancer.*', 'firewall.*', 'firewall_policy.*', 'firewall_rule.*', 'vpnservice.*', 'ipsecpolicy.*', 'ikepolicy.*', 'ipsec_site_connection.*'] - traits: &network_traits - user_id: - fields: _context_user_id - project_id: - fields: _context_tenant_id -- event_type: network.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.network.id', 'payload.id'] -- event_type: subnet.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.subnet.id', 'payload.id'] -- event_type: port.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.port.id', 'payload.id'] -- event_type: router.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.router.id', 'payload.id'] -- event_type: floatingip.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.floatingip.id', 'payload.id'] -- event_type: pool.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.pool.id', 'payload.id'] -- event_type: vip.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.vip.id', 'payload.id'] -- event_type: member.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.member.id', 'payload.id'] -- event_type: health_monitor.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.health_monitor.id', 'payload.id'] -- event_type: healthmonitor.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.healthmonitor.id', 'payload.id'] -- event_type: listener.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.listener.id', 'payload.id'] -- event_type: loadbalancer.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.loadbalancer.id', 'payload.id'] -- event_type: firewall.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.firewall.id', 'payload.id'] -- event_type: firewall_policy.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.firewall_policy.id', 'payload.id'] -- event_type: firewall_rule.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.firewall_rule.id', 'payload.id'] -- event_type: vpnservice.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.vpnservice.id', 'payload.id'] -- event_type: ipsecpolicy.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.ipsecpolicy.id', 'payload.id'] -- event_type: ikepolicy.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.ikepolicy.id', 'payload.id'] -- event_type: ipsec_site_connection.* - traits: - <<: *network_traits - resource_id: - fields: ['payload.ipsec_site_connection.id', 'payload.id'] -- event_type: '*http.*' - traits: &http_audit - project_id: - fields: payload.initiator.project_id - user_id: - fields: payload.initiator.id - typeURI: - fields: payload.typeURI - eventType: - fields: payload.eventType - action: - fields: payload.action - outcome: - fields: payload.outcome - id: - fields: payload.id - eventTime: - fields: payload.eventTime - requestPath: - fields: payload.requestPath - observer_id: - fields: payload.observer.id - target_id: - fields: payload.target.id - target_typeURI: - fields: payload.target.typeURI - target_name: - fields: payload.target.name - initiator_typeURI: - fields: payload.initiator.typeURI - initiator_id: - fields: payload.initiator.id - initiator_name: - fields: payload.initiator.name - initiator_host_address: - fields: payload.initiator.host.address -- event_type: '*http.response' - traits: - <<: *http_audit - reason_code: - fields: payload.reason.reasonCode -- event_type: ['dns.domain.create', 'dns.domain.update', 'dns.domain.delete'] - traits: &dns_domain_traits - status: - fields: payload.status - retry: - fields: payload.retry - description: - fields: payload.description - expire: - fields: payload.expire - email: - fields: payload.email - ttl: - fields: payload.ttl - action: - fields: payload.action - name: - fields: payload.name - resource_id: - fields: payload.id - created_at: - fields: payload.created_at - updated_at: - fields: payload.updated_at - version: - fields: payload.version - parent_domain_id: - fields: parent_domain_id - serial: - fields: payload.serial -- event_type: dns.domain.exists - traits: - <<: *dns_domain_traits - audit_period_beginning: - type: datetime - fields: payload.audit_period_beginning - audit_period_ending: - type: datetime - fields: payload.audit_period_ending -- event_type: trove.* - traits: &trove_base_traits - state: - fields: payload.state_description - instance_type: - fields: payload.instance_type - user_id: - fields: payload.user_id - resource_id: - fields: payload.instance_id - instance_type_id: - fields: payload.instance_type_id - launched_at: - type: datetime - fields: payload.launched_at - instance_name: - fields: payload.instance_name - state: - fields: payload.state - nova_instance_id: - fields: payload.nova_instance_id - service_id: - fields: payload.service_id - created_at: - type: datetime - fields: payload.created_at - region: - fields: payload.region -- event_type: ['trove.instance.create', 'trove.instance.modify_volume', 'trove.instance.modify_flavor', 'trove.instance.delete'] - traits: &trove_common_traits - name: - fields: payload.name - availability_zone: - fields: payload.availability_zone - instance_size: - type: int - fields: payload.instance_size - volume_size: - type: int - fields: payload.volume_size - nova_volume_id: - fields: payload.nova_volume_id -- event_type: trove.instance.create - traits: - <<: [*trove_base_traits, *trove_common_traits] -- event_type: trove.instance.modify_volume - traits: - <<: [*trove_base_traits, *trove_common_traits] - old_volume_size: - type: int - fields: payload.old_volume_size - modify_at: - type: datetime - fields: payload.modify_at -- event_type: trove.instance.modify_flavor - traits: - <<: [*trove_base_traits, *trove_common_traits] - old_instance_size: - type: int - fields: payload.old_instance_size - modify_at: - type: datetime - fields: payload.modify_at -- event_type: trove.instance.delete - traits: - <<: [*trove_base_traits, *trove_common_traits] - deleted_at: - type: datetime - fields: payload.deleted_at -- event_type: trove.instance.exists - traits: - <<: *trove_base_traits - display_name: - fields: payload.display_name - audit_period_beginning: - type: datetime - fields: payload.audit_period_beginning - audit_period_ending: - type: datetime - fields: payload.audit_period_ending -- event_type: profiler.* - traits: - project: - fields: payload.project - service: - fields: payload.service - name: - fields: payload.name - base_id: - fields: payload.base_id - trace_id: - fields: payload.trace_id - parent_id: - fields: payload.parent_id - timestamp: - fields: payload.timestamp - host: - fields: payload.info.host - path: - fields: payload.info.request.path - query: - fields: payload.info.request.query - method: - fields: payload.info.request.method - scheme: - fields: payload.info.request.scheme - db.statement: - fields: payload.info.db.statement - db.params: - fields: payload.info.db.params diff --git a/etc/ceilometer/event_pipeline.yaml b/etc/ceilometer/event_pipeline.yaml deleted file mode 100644 index a91c46a1..00000000 --- a/etc/ceilometer/event_pipeline.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -sources: - - name: event_source - events: - - "*" - sinks: - - event_sink -sinks: - - name: event_sink - transformers: - publishers: - - notifier:// diff --git a/etc/ceilometer/examples/loadbalancer_v2_meter_definitions.yaml b/etc/ceilometer/examples/loadbalancer_v2_meter_definitions.yaml deleted file mode 100644 index 8d009fcd..00000000 --- a/etc/ceilometer/examples/loadbalancer_v2_meter_definitions.yaml +++ /dev/null @@ -1,265 +0,0 @@ -metric: - # LBaaS V2 - - name: "loadbalancer.create" - event_type: - - "loadbalancer.create.end" - type: "delta" - unit: "loadbalancer" - volume: 1 - resource_id: $.payload.loadbalancer.id - project_id: $.payload.loadbalancer.tenant_id - metadata: - name: $.payload.loadbalancer.name - description: $.payload.loadbalancer.description - listeners: $.payload.loadbalancer.listeners - operating_status: $.payload.loadbalancer.operating_status - vip_address: $.payload.loadbalancer.vip_address - vip_subnet_id: $.payload.loadbalancer.vip_subnet_id - admin_state_up: $.payload.loadbalancer.admin_state_up - - - name: "loadbalancer.update" - event_type: - - "loadbalancer.update.end" - type: "delta" - unit: "loadbalancer" - volume: 1 - resource_id: $.payload.loadbalancer.id - project_id: $.payload.loadbalancer.tenant_id - metadata: - name: $.payload.loadbalancer.name - description: $.payload.loadbalancer.description - listeners: $.payload.loadbalancer.listeners - operating_status: $.payload.loadbalancer.operating_status - vip_address: $.payload.loadbalancer.vip_address - vip_subnet_id: $.payload.loadbalancer.vip_subnet_id - admin_state_up: $.payload.loadbalancer.admin_state_up - - - name: "loadbalancer.delete" - event_type: - - "loadbalancer.delete.end" - type: "delta" - unit: "loadbalancer" - volume: 1 - resource_id: $.payload.loadbalancer.id - project_id: $.payload.loadbalancer.tenant_id - metadata: - name: $.payload.loadbalancer.name - description: $.payload.loadbalancer.description - listeners: $.payload.loadbalancer.listeners - operating_status: $.payload.loadbalancer.operating_status - vip_address: $.payload.loadbalancer.vip_address - vip_subnet_id: $.payload.loadbalancer.vip_subnet_id - admin_state_up: $.payload.loadbalancer.admin_state_up - - - name: "listener.create" - event_type: - - "listener.create.end" - type: "delta" - unit: "listener" - volume: 1 - resource_id: $.payload.listener.id - project_id: $.payload.listener.tenant_id - metadata: - name: $.payload.listener.name - description: $.payload.listener.description - admin_state_up: $.payload.listener.admin_state_up - loadbalancers: $.payload.listener.loadbalancers - default_pool_id: $.payload.listener.default_pool_id - protocol: $.payload.listener.protocol - connection_limit: $.payload.listener.connection_limit - - - name: "listener.update" - event_type: - - "listener.update.end" - type: "delta" - unit: "listener" - volume: 1 - resource_id: $.payload.listener.id - project_id: $.payload.listener.tenant_id - metadata: - name: $.payload.listener.name - description: $.payload.listener.description - admin_state_up: $.payload.listener.admin_state_up - loadbalancers: $.payload.listener.loadbalancers - default_pool_id: $.payload.listener.default_pool_id - protocol: $.payload.listener.protocol - connection_limit: $.payload.listener.connection_limit - - - name: "listener.delete" - event_type: - - "listener.delete.end" - type: "delta" - unit: "listener" - volume: 1 - resource_id: $.payload.listener.id - project_id: $.payload.listener.tenant_id - metadata: - name: $.payload.listener.name - description: $.payload.listener.description - admin_state_up: $.payload.listener.admin_state_up - loadbalancers: $.payload.listener.loadbalancers - default_pool_id: $.payload.listener.default_pool_id - protocol: $.payload.listener.protocol - connection_limit: $.payload.listener.connection_limit - - - name: "healthmonitor.create" - event_type: - - "healthmonitor.create.end" - type: "delta" - unit: "healthmonitor" - volume: 1 - resource_id: $.payload.healthmonitor.id - project_id: $.payload.healthmonitor.tenant_id - metadata: - name: $.payload.healthmonitor.name - description: $.payload.healthmonitor.description - admin_state_up: $.payload.healthmonitor.admin_state_up - max_retries: $.payload.healthmonitor.max_retries - delay: $.payload.healthmonitor.delay - timeout: $.payload.healthmonitor.timeout - pools: $.payload.healthmonitor.pools - type: $.payload.healthmonitor.type - - - name: "healthmonitor.update" - event_type: - - "healthmonitor.update.end" - type: "delta" - unit: "healthmonitor" - volume: 1 - resource_id: $.payload.healthmonitor.id - project_id: $.payload.healthmonitor.tenant_id - metadata: - name: $.payload.healthmonitor.name - description: $.payload.healthmonitor.description - admin_state_up: $.payload.healthmonitor.admin_state_up - max_retries: $.payload.healthmonitor.max_retries - delay: $.payload.healthmonitor.delay - timeout: $.payload.healthmonitor.timeout - pools: $.payload.healthmonitor.pools - type: $.payload.healthmonitor.type - - - name: "healthmonitor.delete" - event_type: - - "healthmonitor.delete.end" - type: "delta" - unit: "healthmonitor" - volume: 1 - resource_id: $.payload.healthmonitor.id - project_id: $.payload.healthmonitor.tenant_id - metadata: - name: $.payload.healthmonitor.name - description: $.payload.healthmonitor.description - admin_state_up: $.payload.healthmonitor.admin_state_up - max_retries: $.payload.healthmonitor.max_retries - delay: $.payload.healthmonitor.delay - timeout: $.payload.healthmonitor.timeout - pools: $.payload.healthmonitor.pools - type: $.payload.healthmonitor.type - - - name: "pool.create" - event_type: - - "pool.create.end" - type: "delta" - unit: "pool" - volume: 1 - resource_id: $.payload.pool.id - project_id: $.payload.pool.tenant_id - metadata: - name: $.payload.pool.name - description: $.payload.pool.description - admin_state_up: $.payload.pool.admin_state_up - lb_method: $.payload.pool.lb_method - protocol: $.payload.pool.protocol - subnet_id: $.payload.pool.subnet_id - vip_id: $.payload.pool.vip_id - status: $.payload.pool.status - status_description: $.payload.pool.status_description - - - name: "pool.update" - event_type: - - "pool.update.end" - type: "delta" - unit: "pool" - volume: 1 - resource_id: $.payload.pool.id - project_id: $.payload.pool.tenant_id - metadata: - name: $.payload.pool.name - description: $.payload.pool.description - admin_state_up: $.payload.pool.admin_state_up - lb_method: $.payload.pool.lb_method - protocol: $.payload.pool.protocol - subnet_id: $.payload.pool.subnet_id - vip_id: $.payload.pool.vip_id - status: $.payload.pool.status - status_description: $.payload.pool.status_description - - - name: "pool.delete" - event_type: - - "pool.delete.end" - type: "delta" - unit: "pool" - volume: 1 - resource_id: $.payload.pool.id - project_id: $.payload.pool.tenant_id - metadata: - name: $.payload.pool.name - description: $.payload.pool.description - admin_state_up: $.payload.pool.admin_state_up - lb_method: $.payload.pool.lb_method - protocol: $.payload.pool.protocol - subnet_id: $.payload.pool.subnet_id - vip_id: $.payload.pool.vip_id - status: $.payload.pool.status - status_description: $.payload.pool.status_description - - - name: "member.create" - event_type: - - "member.create.end" - type: "delta" - unit: "member" - volume: 1 - resource_id: $.payload.member.id - project_id: $.payload.member.tenant_id - metadata: - address: $.payload.member.address - status: $.payload.member.status - status_description: $.payload.member.status_description - weight: $.payload.member.weight - admin_state_up: $.payload.member.admin_state_up - protocol_port: $.payload.member.protocol_port - pool_id: $.payload.member.pool_id - - - name: "member.update" - event_type: - - "member.update.end" - type: "delta" - unit: "member" - volume: 1 - resource_id: $.payload.member.id - project_id: $.payload.member.tenant_id - metadata: - address: $.payload.member.address - status: $.payload.member.status - status_description: $.payload.member.status_description - weight: $.payload.member.weight - admin_state_up: $.payload.member.admin_state_up - protocol_port: $.payload.member.protocol_port - pool_id: $.payload.member.pool_id - - - name: "member.delete" - event_type: - - "member.delete.end" - type: "delta" - unit: "member" - volume: 1 - resource_id: $.payload.member.id - project_id: $.payload.member.tenant_id - metadata: - address: $.payload.member.address - status: $.payload.member.status - status_description: $.payload.member.status_description - weight: $.payload.member.weight - admin_state_up: $.payload.member.admin_state_up - protocol_port: $.payload.member.protocol_port - pool_id: $.payload.member.pool_id diff --git a/etc/ceilometer/examples/osprofiler_event_definitions.yaml b/etc/ceilometer/examples/osprofiler_event_definitions.yaml deleted file mode 100644 index d2a87539..00000000 --- a/etc/ceilometer/examples/osprofiler_event_definitions.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- event_type: profiler.* - traits: - project: - fields: payload.project - service: - fields: payload.service - name: - fields: payload.name - base_id: - fields: payload.base_id - trace_id: - fields: payload.trace_id - parent_id: - fields: payload.parent_id - timestamp: - fields: payload.timestamp - host: - fields: payload.info.host - path: - fields: payload.info.request.path - query: - fields: payload.info.request.query - method: - fields: payload.info.request.method - scheme: - fields: payload.info.request.scheme - db.statement: - fields: payload.info.db.statement - db.params: - fields: payload.info.db.params diff --git a/etc/ceilometer/gnocchi_resources.yaml b/etc/ceilometer/gnocchi_resources.yaml deleted file mode 100644 index 88b81541..00000000 --- a/etc/ceilometer/gnocchi_resources.yaml +++ /dev/null @@ -1,213 +0,0 @@ ---- - -resources: - - resource_type: identity - archive_policy: low - metrics: - - 'identity.authenticate.success' - - 'identity.authenticate.pending' - - 'identity.authenticate.failure' - - 'identity.user.created' - - 'identity.user.deleted' - - 'identity.user.updated' - - 'identity.group.created' - - 'identity.group.deleted' - - 'identity.group.updated' - - 'identity.role.created' - - 'identity.role.deleted' - - 'identity.role.updated' - - 'identity.project.created' - - 'identity.project.deleted' - - 'identity.project.updated' - - 'identity.trust.created' - - 'identity.trust.deleted' - - 'identity.role_assignment.created' - - 'identity.role_assignment.deleted' - - - resource_type: ceph_account - metrics: - - 'radosgw.objects' - - 'radosgw.objects.size' - - 'radosgw.objects.containers' - - 'radosgw.api.request' - - 'radosgw.containers.objects' - - 'radosgw.containers.objects.size' - - - resource_type: instance - metrics: - - 'instance' - - 'memory' - - 'memory.usage' - - 'memory.resident' - - 'vcpus' - - 'cpu' - - 'cpu.delta' - - 'cpu_util' - - 'disk.root.size' - - 'disk.ephemeral.size' - - 'disk.read.requests' - - 'disk.read.requests.rate' - - 'disk.write.requests' - - 'disk.write.requests.rate' - - 'disk.read.bytes' - - 'disk.read.bytes.rate' - - 'disk.write.bytes' - - 'disk.write.bytes.rate' - - 'disk.latency' - - 'disk.iops' - - 'disk.capacity' - - 'disk.allocation' - - 'disk.usage' - attributes: - host: resource_metadata.host - image_ref: resource_metadata.image_ref - display_name: resource_metadata.display_name - flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)) - server_group: resource_metadata.user_metadata.server_group - - - resource_type: instance_network_interface - metrics: - - 'network.outgoing.packets.rate' - - 'network.incoming.packets.rate' - - 'network.outgoing.packets' - - 'network.incoming.packets' - - 'network.outgoing.bytes.rate' - - 'network.incoming.bytes.rate' - - 'network.outgoing.bytes' - - 'network.incoming.bytes' - attributes: - name: resource_metadata.vnic_name - instance_id: resource_metadata.instance_id - - - resource_type: instance_disk - metrics: - - 'disk.device.read.requests' - - 'disk.device.read.requests.rate' - - 'disk.device.write.requests' - - 'disk.device.write.requests.rate' - - 'disk.device.read.bytes' - - 'disk.device.read.bytes.rate' - - 'disk.device.write.bytes' - - 'disk.device.write.bytes.rate' - - 'disk.device.latency' - - 'disk.device.iops' - - 'disk.device.capacity' - - 'disk.device.allocation' - - 'disk.device.usage' - attributes: - name: resource_metadata.disk_name - instance_id: resource_metadata.instance_id - - - resource_type: image - metrics: - - 'image' - - 'image.size' - - 'image.download' - - 'image.serve' - attributes: - name: resource_metadata.name - container_format: resource_metadata.container_format - disk_format: resource_metadata.disk_format - - - resource_type: ipmi - metrics: - - 'hardware.ipmi.node.power' - - 'hardware.ipmi.node.temperature' - - 'hardware.ipmi.node.inlet_temperature' - - 'hardware.ipmi.node.outlet_temperature' - - 'hardware.ipmi.node.fan' - - 'hardware.ipmi.node.current' - - 'hardware.ipmi.node.voltage' - - 'hardware.ipmi.node.airflow' - - 'hardware.ipmi.node.cups' - - 'hardware.ipmi.node.cpu_util' - - 'hardware.ipmi.node.mem_util' - - 'hardware.ipmi.node.io_util' - - - resource_type: network - metrics: - - 'bandwidth' - - 'network' - - 'network.create' - - 'network.update' - - 'subnet' - - 'subnet.create' - - 'subnet.update' - - 'port' - - 'port.create' - - 'port.update' - - 'router' - - 'router.create' - - 'router.update' - - 'ip.floating' - - 'ip.floating.create' - - 'ip.floating.update' - - - resource_type: stack - metrics: - - 'stack.create' - - 'stack.update' - - 'stack.delete' - - 'stack.resume' - - 'stack.suspend' - - - resource_type: swift_account - metrics: - - 'storage.objects.incoming.bytes' - - 'storage.objects.outgoing.bytes' - - 'storage.api.request' - - 'storage.objects.size' - - 'storage.objects' - - 'storage.objects.containers' - - 'storage.containers.objects' - - 'storage.containers.objects.size' - - - resource_type: volume - metrics: - - 'volume' - - 'volume.size' - - 'volume.create' - - 'volume.delete' - - 'volume.update' - - 'volume.resize' - - 'volume.attach' - - 'volume.detach' - attributes: - display_name: resource_metadata.display_name - - - resource_type: host - metrics: - - 'hardware.cpu.load.1min' - - 'hardware.cpu.load.5min' - - 'hardware.cpu.load.15min' - - 'hardware.cpu.util' - - 'hardware.memory.total' - - 'hardware.memory.used' - - 'hardware.memory.swap.total' - - 'hardware.memory.swap.avail' - - 'hardware.memory.buffer' - - 'hardware.memory.cached' - - 'hardware.network.ip.outgoing.datagrams' - - 'hardware.network.ip.incoming.datagrams' - - 'hardware.system_stats.cpu.idle' - - 'hardware.system_stats.io.outgoing.blocks' - - 'hardware.system_stats.io.incoming.blocks' - attributes: - host_name: resource_metadata.resource_url - - - resource_type: host_disk - metrics: - - 'hardware.disk.size.total' - - 'hardware.disk.size.used' - attributes: - host_name: resource_metadata.resource_url - device_name: resource_metadata.device - - - resource_type: host_network_interface - metrics: - - 'hardware.network.incoming.bytes' - - 'hardware.network.outgoing.bytes' - - 'hardware.network.outgoing.errors' - attributes: - host_name: resource_metadata.resource_url - device_name: resource_metadata.name diff --git a/etc/ceilometer/pipeline.yaml b/etc/ceilometer/pipeline.yaml deleted file mode 100644 index a5bd5148..00000000 --- a/etc/ceilometer/pipeline.yaml +++ /dev/null @@ -1,92 +0,0 @@ ---- -sources: - - name: meter_source - interval: 600 - meters: - - "*" - sinks: - - meter_sink - - name: cpu_source - interval: 600 - meters: - - "cpu" - sinks: - - cpu_sink - - cpu_delta_sink - - name: disk_source - interval: 600 - meters: - - "disk.read.bytes" - - "disk.read.requests" - - "disk.write.bytes" - - "disk.write.requests" - - "disk.device.read.bytes" - - "disk.device.read.requests" - - "disk.device.write.bytes" - - "disk.device.write.requests" - sinks: - - disk_sink - - name: network_source - interval: 600 - meters: - - "network.incoming.bytes" - - "network.incoming.packets" - - "network.outgoing.bytes" - - "network.outgoing.packets" - sinks: - - network_sink -sinks: - - name: meter_sink - transformers: - publishers: - - notifier:// - - name: cpu_sink - transformers: - - name: "rate_of_change" - parameters: - target: - name: "cpu_util" - unit: "%" - type: "gauge" - scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" - publishers: - - notifier:// - - name: cpu_delta_sink - transformers: - - name: "delta" - parameters: - target: - name: "cpu.delta" - growth_only: True - publishers: - - notifier:// - - name: disk_sink - transformers: - - name: "rate_of_change" - parameters: - source: - map_from: - name: "(disk\\.device|disk)\\.(read|write)\\.(bytes|requests)" - unit: "(B|request)" - target: - map_to: - name: "\\1.\\2.\\3.rate" - unit: "\\1/s" - type: "gauge" - publishers: - - notifier:// - - name: network_sink - transformers: - - name: "rate_of_change" - parameters: - source: - map_from: - name: "network\\.(incoming|outgoing)\\.(bytes|packets)" - unit: "(B|packet)" - target: - map_to: - name: "network.\\1.\\2.rate" - unit: "\\1/s" - type: "gauge" - publishers: - - notifier:// diff --git a/etc/ceilometer/rootwrap.conf b/etc/ceilometer/rootwrap.conf deleted file mode 100644 index f5d90d20..00000000 --- a/etc/ceilometer/rootwrap.conf +++ /dev/null @@ -1,27 +0,0 @@ -# Configuration for ceilometer-rootwrap -# This file should be owned by (and only-writeable by) the root user - -[DEFAULT] -# List of directories to load filter definitions from (separated by ','). -# These directories MUST all be only writeable by root ! -filters_path=/etc/ceilometer/rootwrap.d,/usr/share/ceilometer/rootwrap - -# List of directories to search executables in, in case filters do not -# explicitely specify a full path (separated by ',') -# If not specified, defaults to system PATH environment variable. -# These directories MUST all be only writeable by root ! -exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/sbin,/usr/local/bin - -# Enable logging to syslog -# Default value is False -use_syslog=False - -# Which syslog facility to use. -# Valid values include auth, authpriv, syslog, user0, user1... -# Default value is 'syslog' -syslog_log_facility=syslog - -# Which messages to log. -# INFO means log all usage -# ERROR means only log unsuccessful attempts -syslog_log_level=ERROR diff --git a/etc/ceilometer/rootwrap.d/ipmi.filters b/etc/ceilometer/rootwrap.d/ipmi.filters deleted file mode 100644 index 2ef74b04..00000000 --- a/etc/ceilometer/rootwrap.d/ipmi.filters +++ /dev/null @@ -1,7 +0,0 @@ -# ceilometer-rootwrap command filters for IPMI capable nodes -# This file should be owned by (and only-writeable by) the root user - -[Filters] -# ceilometer/ipmi/nodemanager/node_manager.py: 'ipmitool' -ipmitool: CommandFilter, ipmitool, root - diff --git a/etc/panko/policy.json b/etc/panko/policy.json new file mode 100644 index 00000000..332d080d --- /dev/null +++ b/etc/panko/policy.json @@ -0,0 +1,7 @@ +{ + "context_is_admin": "role:admin", + "segregation": "rule:context_is_admin", + + "telemetry:events:index": "", + "telemetry:events:show": "" +} diff --git a/rally-jobs/README.rst b/rally-jobs/README.rst deleted file mode 100644 index c9996a36..00000000 --- a/rally-jobs/README.rst +++ /dev/null @@ -1,29 +0,0 @@ -Rally job related files -======================= - -This directory contains rally tasks and plugins that are run by OpenStack CI. - -Structure ---------- - -* plugins - directory where you can add rally plugins. Almost everything in - Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic - cleanup resources, .... - -* extra - all files from this directory will be copy pasted to gates, so you - are able to use absolute paths in rally tasks. - Files will be located in ~/.rally/extra/* - -* ceilometer is a task that is run in gates against Ceilometer - - -Useful links ------------- - -* More about Rally: https://rally.readthedocs.org/en/latest/ - -* How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html - -* About plugins: https://rally.readthedocs.org/en/latest/plugins.html - -* Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins diff --git a/rally-jobs/ceilometer.yaml b/rally-jobs/ceilometer.yaml deleted file mode 100644 index 32c1022f..00000000 --- a/rally-jobs/ceilometer.yaml +++ /dev/null @@ -1,69 +0,0 @@ ---- - - CeilometerMeters.list_meters: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - - CeilometerResource.list_resources: - - - runner: - type: "constant" - times: 10 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - - CeilometerStats.create_meter_and_get_stats: - - - args: - user_id: "user-id" - resource_id: "resource-id" - counter_volume: 1.0 - counter_unit: "" - counter_type: "cumulative" - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - - CeilometerQueries.create_and_query_samples: - - - args: - filter: {"=": {"counter_unit": "instance"}} - orderby: !!null - limit: 10 - counter_name: "cpu_util" - counter_type: "gauge" - counter_unit: "instance" - counter_volume: "1.0" - resource_id: "resource_id" - runner: - type: "constant" - times: 20 - concurrency: 10 - context: - users: - tenants: 1 - users_per_tenant: 1 - sla: - max_failure_percent: 0 - diff --git a/rally-jobs/extra/README.rst b/rally-jobs/extra/README.rst deleted file mode 100644 index aab343c5..00000000 --- a/rally-jobs/extra/README.rst +++ /dev/null @@ -1,6 +0,0 @@ -Extra files -=========== - -All files from this directory will be copy pasted to gates, so you are able to -use absolute path in rally tasks. Files will be in ~/.rally/extra/* - diff --git a/rally-jobs/extra/fake.img b/rally-jobs/extra/fake.img deleted file mode 100644 index e69de29b..00000000 diff --git a/rally-jobs/plugins/README.rst b/rally-jobs/plugins/README.rst deleted file mode 100644 index 33bec0d2..00000000 --- a/rally-jobs/plugins/README.rst +++ /dev/null @@ -1,9 +0,0 @@ -Rally plugins -============= - -All *.py modules from this directory will be auto-loaded by Rally and all -plugins will be discoverable. There is no need of any extra configuration -and there is no difference between writing them here and in rally code base. - -Note that it is better to push all interesting and useful benchmarks to Rally -code base, this simplifies administration for Operators. diff --git a/rally-jobs/plugins/plugin_sample.py b/rally-jobs/plugins/plugin_sample.py deleted file mode 100644 index 6541357a..00000000 --- a/rally-jobs/plugins/plugin_sample.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" Sample of plugin for Ceilometer. - -For more Ceilometer related benchmarks take a look here: -github.com/openstack/rally/blob/master/rally/benchmark/scenarios/ceilometer/ - -About plugins: https://rally.readthedocs.org/en/latest/plugins.html - -Rally concepts https://wiki.openstack.org/wiki/Rally/Concepts -""" - -from rally.benchmark.scenarios import base - - -class CeilometerPlugin(base.Scenario): - pass diff --git a/releasenotes/notes/.placeholder b/releasenotes/notes/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml b/releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml deleted file mode 100644 index 32b4b248..00000000 --- a/releasenotes/notes/aggregator-transformer-timeout-e0f42b6c96aa7ada.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - > - [`bug 1531626 `_] - Ensure aggregator transformer timeout is honoured if size is not provided. diff --git a/releasenotes/notes/always-requeue-7a2df9243987ab67.yaml b/releasenotes/notes/always-requeue-7a2df9243987ab67.yaml deleted file mode 100644 index 68fd2370..00000000 --- a/releasenotes/notes/always-requeue-7a2df9243987ab67.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -critical: - - > - The previous configuration options default for - `requeue_sample_on_dispatcher_error' and - `requeue_event_on_dispatcher_error' allowed to lose data very easily: if - the dispatcher failed to send data to the backend (e.g. Gnocchi is down), - then the dispatcher raised and the data were lost forever. This was - completely unacceptable, and nobody should be able to configure Ceilometer - in that way." - -upgrade: - - > - The options `requeue_event_on_dispatcher_error' and - `requeue_sample_on_dispatcher_error' have been enabled and removed. diff --git a/releasenotes/notes/batch-messaging-d126cc525879d58e.yaml b/releasenotes/notes/batch-messaging-d126cc525879d58e.yaml deleted file mode 100644 index e691bec1..00000000 --- a/releasenotes/notes/batch-messaging-d126cc525879d58e.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -features: - - > - Add support for batch processing of messages from queue. This will allow - the collector and notification agent to grab multiple messages per thread - to enable more efficient processing. -upgrade: - - > - batch_size and batch_timeout configuration options are added to both - [notification] and [collector] sections of configuration. The batch_size - controls the number of messages to grab before processing. Similarly, - the batch_timeout defines the wait time before processing. diff --git a/releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml b/releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml deleted file mode 100644 index 39491021..00000000 --- a/releasenotes/notes/cache-json-parsers-888307f3b6b498a2.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - > - [`bug 1550436 `_] - Cache json parsers when building parsing logic to handle event and - meter definitions. This will improve agent startup and setup time. diff --git a/releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml b/releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml deleted file mode 100644 index ff9ae9f5..00000000 --- a/releasenotes/notes/compute-discovery-interval-d19f7c9036a8c186.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - > - To minimise load on Nova API, an additional configuration option was added - to control discovery interval vs metric polling interval. If - resource_update_interval option is configured in compute section, the - compute agent will discover new instances based on defined interval. The - agent will continue to poll the discovered instances at the interval - defined by pipeline. diff --git a/releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml b/releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml deleted file mode 100644 index 6ab41f75..00000000 --- a/releasenotes/notes/configurable-data-collector-e247aadbffb85243.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - > - [`bug 1480333 `_] - Support ability to configure collector to capture events or meters mutually - exclusively, rather than capturing both always. -other: - - > - Configure individual dispatchers by specifying meter_dispatchers and - event_dispatchers in configuration file. diff --git a/releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml b/releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml deleted file mode 100644 index c9fbe533..00000000 --- a/releasenotes/notes/cors-support-70c33ba1f6825a7b.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - > - Support for CORS is added. More information can be found - [`here `_] -upgrade: - - > - The api-paste.ini file can be modified to include or exclude the CORs - middleware. Additional configurations can be made to middleware as well. diff --git a/releasenotes/notes/event-type-race-c295baf7f1661eab.yaml b/releasenotes/notes/event-type-race-c295baf7f1661eab.yaml deleted file mode 100644 index a4b0c6ef..00000000 --- a/releasenotes/notes/event-type-race-c295baf7f1661eab.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - > - [`bug 1254800 `_] - Add better support to catch race conditions when creating event_types diff --git a/releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml b/releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml deleted file mode 100644 index 45794a74..00000000 --- a/releasenotes/notes/fix-agent-coordination-a7103a78fecaec24.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -critical: - - > - [`bug 1533787 `_] - Fix an issue where agents are not properly getting registered to group - when multiple notification agents are deployed. This can result in - bad transformation as the agents are not coordinated. It is still - recommended to set heartbeat_timeout_threshold = 0 in - [oslo_messaging_rabbit] section when deploying multiple agents. diff --git a/releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml b/releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml deleted file mode 100644 index 60c598b3..00000000 --- a/releasenotes/notes/fix-aggregation-transformer-9472aea189fa8f65.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - > - [`bug 1539163 `_] - Add ability to define whether to use first or last timestamps when - aggregating samples. This will allow more flexibility when chaining - transformers. diff --git a/releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml b/releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml deleted file mode 100644 index 1bd295ab..00000000 --- a/releasenotes/notes/fix-floatingip-pollster-f5172060c626b19e.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -fixes: - - > - [`bug 1536338 `_] - Patch was added to fix the broken floatingip pollster - that polled data from nova api, but since the nova api - filtered the data by tenant, ceilometer was not getting - any data back. The fix changes the pollster to use the - neutron api instead to get the floating ip info. diff --git a/releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml b/releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml deleted file mode 100644 index d3eb8399..00000000 --- a/releasenotes/notes/fix-network-lb-bytes-sample-5dec2c6f3a8ae174.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - > - [`bug 1530793 `_] - network.services.lb.incoming.bytes meter was previous set to incorrect - type. It should be a gauge meter. diff --git a/releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml b/releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml deleted file mode 100644 index 653d3b32..00000000 --- a/releasenotes/notes/gnocchi-cache-1d8025dfc954f281.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -features: - - > - Support resource caching in Gnocchi dispatcher to improve write - performance to avoid additional queries. -other: - - > - A dogpile.cache supported backend is required to enable cache. Additional - configuration `options `_ - are also required. diff --git a/releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml b/releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml deleted file mode 100644 index 29f4b04e..00000000 --- a/releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - > - [`bug 255569 `_] - Fix caching support in Gnocchi dispatcher. Added better locking support - to enable smoother cache access. diff --git a/releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml b/releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml deleted file mode 100644 index 01774a90..00000000 --- a/releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - > - Gnocchi dispatcher now uses client rather than direct http requests -upgrade: - - > - gnocchiclient library is now a requirement if using ceilometer+gnocchi. diff --git a/releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml b/releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml deleted file mode 100644 index baf5db49..00000000 --- a/releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -features: - - > - [`bug 1518338 `_] - Add support for storing SNMP metrics in Gnocchi.This functionality requires - Gnocchi v2.1.0 to be installed. diff --git a/releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml b/releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml deleted file mode 100644 index 23f557e5..00000000 --- a/releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -upgrade: - - > - gnocchi_resources.yaml in Ceilometer should be updated. -fixes: - - > - Fix samples from Heat to map to correct Gnocchi resource type diff --git a/releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml b/releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml deleted file mode 100644 index 485204b2..00000000 --- a/releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - > - [`bug 1523124 `_] - Fix gnocchi dispatcher to support UDP collector diff --git a/releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml b/releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml deleted file mode 100644 index 4149a0b0..00000000 --- a/releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - > - [`bug 1542189 `_] - Handle malformed resource definitions in gnocchi_resources.yaml - gracefully. Currently we raise an exception once we hit a bad - resource and skip the rest. Instead the patch skips the bad - resource and proceeds with rest of the definitions. diff --git a/releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml b/releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml deleted file mode 100644 index c6eb6e77..00000000 --- a/releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- -upgrade: - - > - To utilize the new policy support. The policy.json file - should be updated accordingly. The pre-existing policy.json - file will continue to function as it does if policy changes - are not required. -fixes: - - > - [`bug 1504495 `_] - Configure ceilometer to handle policy.json rules when possible. diff --git a/releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml b/releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml deleted file mode 100644 index c3fcf6c8..00000000 --- a/releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -upgrade: - - > - Run db-sync to add new indices. -fixes: - - > - [`bug 1526793 `_] - Additional indices were added to better support querying of event data. diff --git a/releasenotes/notes/keystone-v3-fab1e257c5672965.yaml b/releasenotes/notes/keystone-v3-fab1e257c5672965.yaml deleted file mode 100644 index 87225fad..00000000 --- a/releasenotes/notes/keystone-v3-fab1e257c5672965.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - > - Add support for Keystone v3 authentication diff --git a/releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml b/releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml deleted file mode 100644 index 9bb5c5b1..00000000 --- a/releasenotes/notes/lookup-meter-def-vol-correctly-0122ae429275f2a6.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -fixes: - - > - [`bug 1536699 `_] - Patch to fix volume field lookup in meter definition file. In case - the field is missing in the definition, it raises a keyerror and - aborts. Instead we should skip the missing field meter and continue - with the rest of the definitions. diff --git a/releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml b/releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml deleted file mode 100644 index c2a86272..00000000 --- a/releasenotes/notes/mongodb-handle-large-numbers-7c235598ca700f2d.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -fixes: - - > - [`bug 1532661 `_] - Fix statistics query failures due to large numbers stored in MongoDB. Data - from MongoDB is returned as Int64 for big numbers when int and float types - are expected. The data is cast to appropriate type to handle large data. diff --git a/releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml b/releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml deleted file mode 100644 index 42955cdd..00000000 --- a/releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - > - Ceilometer alarms code is now fully removed from code base. - Equivalent functionality is handled by Aodh. diff --git a/releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml b/releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml deleted file mode 100644 index 0c2e1fc9..00000000 --- a/releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - > - Support for CADF-only payload in HTTP dispatcher is dropped as - audit middleware in pyCADF was dropped in Kilo cycle. -upgrade: - - > - audit middleware in keystonemiddleware library should be used for - similar support. diff --git a/releasenotes/notes/remove-eventlet-6738321434b60c78.yaml b/releasenotes/notes/remove-eventlet-6738321434b60c78.yaml deleted file mode 100644 index 2b565152..00000000 --- a/releasenotes/notes/remove-eventlet-6738321434b60c78.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -features: - - > - Remove eventlet from Ceilometer in favour of threaded approach diff --git a/releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml b/releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml deleted file mode 100644 index 669a85fa..00000000 --- a/releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -features: - - > - RPC collector support is dropped. The queue-based notifier publisher and - collector was added as the recommended alternative as of Icehouse cycle. -upgrade: - - > - Pipeline.yaml files for agents should be updated to notifier:// or udp:// - publishers. The rpc:// publisher is no longer supported. diff --git a/releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml b/releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml deleted file mode 100644 index 0a1ec7e5..00000000 --- a/releasenotes/notes/skip-duplicate-meter-def-0420164f6a95c50c.yaml +++ /dev/null @@ -1,10 +0,0 @@ - - ---- -fixes: - - > - [`bug 1536498 `_] - Patch to fix duplicate meter definitions causing duplicate samples. - If a duplicate is found, log a warning and skip the meter definition. - Note that the first occurrence of a meter will be used and any following - duplicates will be skipped from processing. diff --git a/releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml b/releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml deleted file mode 100644 index 39482a91..00000000 --- a/releasenotes/notes/sql-query-optimisation-ebb2233f7a9b5d06.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -fixes: - - > - [`bug 1506738 `_] - [`bug 1509677 `_] - Optimise SQL backend queries to minimise query load diff --git a/releasenotes/notes/support-None-query-45abaae45f08eda4.yaml b/releasenotes/notes/support-None-query-45abaae45f08eda4.yaml deleted file mode 100644 index 248e3582..00000000 --- a/releasenotes/notes/support-None-query-45abaae45f08eda4.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -fixes: - - > - [`bug 1388680 `_] - Suppose ability to query for None value when using SQL backend. diff --git a/releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml b/releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml deleted file mode 100644 index 7c652ab9..00000000 --- a/releasenotes/notes/support-lbaasv2-polling-c830dd49bcf25f64.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -features: - - > - Support for polling Neutron's LBaaS v2 API was added as v1 API in Neutron - is deprecated. The same metrics are available between v1 and v2. -issues: - - > - Neutron API is not designed to be polled against. When polling against - Neutron is enabled, Ceilometer's polling agents may generage a significant - load against the Neutron API. It is recommended that a dedicated API be - enabled for polling while Neutron's API is improved to handle polling. -upgrade: - - > - By default, Ceilometer will poll the v2 API. To poll legacy v1 API, - add neutron_lbaas_version=v1 option to configuration file. diff --git a/releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml b/releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml deleted file mode 100644 index fb970fd0..00000000 --- a/releasenotes/notes/support-snmp-cpu-util-5c1c7afb713c1acd.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- -features: - - > - [`bug 1513731 `_] - Add support for hardware cpu_util in snmp.yaml diff --git a/releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml b/releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml deleted file mode 100644 index 46fdf04f..00000000 --- a/releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -features: - - > - [`bug 1506959 `_] - Add support to query unique set of meter names rather than meters - associated with each resource. The list is available by adding unique=True - option to request. diff --git a/releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml b/releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml deleted file mode 100644 index fbb6414c..00000000 --- a/releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -critical: - - > - [`bug 1519767 `_] - fnmatch functionality in python <= 2.7.9 is not threadsafe. this issue and - its potential race conditions are now patched. diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder deleted file mode 100644 index e69de29b..00000000 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py deleted file mode 100644 index fa6da509..00000000 --- a/releasenotes/source/conf.py +++ /dev/null @@ -1,275 +0,0 @@ -# -*- coding: utf-8 -*- -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Ceilometer Release Notes documentation build configuration file, created by -# sphinx-quickstart on Tue Nov 3 17:40:50 2015. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'oslosphinx', - 'reno.sphinxext', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Ceilometer Release Notes' -copyright = u'2015, Ceilometer Developers' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -from ceilometer.version import version_info as ceilometer_version -# The full version, including alpha/beta/rc tags. -release = ceilometer_version.version_string_with_vcs() -# The short X.Y version. -version = ceilometer_version.canonical_version_string() - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'CeilometerReleaseNotesdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # 'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'CeilometerReleaseNotes.tex', - u'Ceilometer Release Notes Documentation', - u'Ceilometer Developers', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'ceilometerreleasenotes', - u'Ceilometer Release Notes Documentation', [u'Ceilometer Developers'], 1) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'CeilometerReleaseNotes', - u'Ceilometer Release Notes Documentation', - u'Ceilometer Developers', 'CeilometerReleaseNotes', - 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst deleted file mode 100644 index 49a83ead..00000000 --- a/releasenotes/source/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -========================= - Ceilometer Release Notes -========================= - -.. toctree:: - :maxdepth: 1 - - mitaka - liberty - unreleased diff --git a/releasenotes/source/liberty.rst b/releasenotes/source/liberty.rst deleted file mode 100644 index 36217be8..00000000 --- a/releasenotes/source/liberty.rst +++ /dev/null @@ -1,6 +0,0 @@ -============================== - Liberty Series Release Notes -============================== - -.. release-notes:: - :branch: origin/stable/liberty diff --git a/releasenotes/source/mitaka.rst b/releasenotes/source/mitaka.rst deleted file mode 100644 index e5456096..00000000 --- a/releasenotes/source/mitaka.rst +++ /dev/null @@ -1,6 +0,0 @@ -=================================== - Mitaka Series Release Notes -=================================== - -.. release-notes:: - :branch: origin/stable/mitaka diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst deleted file mode 100644 index cd22aabc..00000000 --- a/releasenotes/source/unreleased.rst +++ /dev/null @@ -1,5 +0,0 @@ -============================== - Current Series Release Notes -============================== - -.. release-notes:: diff --git a/requirements.txt b/requirements.txt index 2a8f7e4f..cce51a04 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,46 +2,25 @@ # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. -futures>=3.0;python_version=='2.7' or python_version=='2.6' # BSD -futurist>=0.11.0 # Apache-2.0 -debtcollector>=1.2.0 # Apache-2.0 +debtcollector>=1.2.0 # Apache-2.0 retrying!=1.3.0,>=1.2.3 # Apache-2.0 -jsonpath-rw-ext>=0.1.9 # Apache-2.0 -jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT -kafka-python<1.0.0,>=0.9.5 # Apache-2.0 keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0 lxml>=2.3 # BSD -msgpack-python>=0.4.0 # Apache-2.0 oslo.db>=4.1.0 # Apache-2.0 -oslo.concurrency>=3.5.0 # Apache-2.0 oslo.config>=3.9.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.policy>=0.5.0 # Apache-2.0 oslo.reports>=0.6.0 # Apache-2.0 -oslo.rootwrap>=2.0.0 # Apache-2.0 -oslo.service>=1.0.0 # Apache-2.0 PasteDeploy>=1.5.0 # MIT pbr>=1.6 # Apache-2.0 pecan>=1.0.0 # BSD -oslo.messaging>=5.2.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 -oslo.serialization>=1.10.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0 -pysnmp<5.0.0,>=4.2.3 # BSD -python-glanceclient>=2.0.0 # Apache-2.0 -python-keystoneclient!=1.8.0,!=2.1.0,>=1.6.0 # Apache-2.0 -keystoneauth1>=2.1.0 # Apache-2.0 -python-neutronclient>=4.2.0 # Apache-2.0 -python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0 -python-swiftclient>=2.2.0 # Apache-2.0 PyYAML>=3.1.0 # MIT -requests!=2.9.0,>=2.8.1 # Apache-2.0 six>=1.9.0 # MIT SQLAlchemy<1.1.0,>=1.0.10 # MIT -sqlalchemy-migrate>=0.9.6 # Apache-2.0 stevedore>=1.9.0 # Apache-2.0 -tooz>=1.28.0 # Apache-2.0 Werkzeug>=0.7 # BSD License WebOb>=1.2.3 # MIT WSME>=0.8 # MIT diff --git a/setup.cfg b/setup.cfg index 81f4ceb9..29cfc6fc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -26,163 +26,6 @@ packages = ceilometer [entry_points] -ceilometer.notification = - instance = ceilometer.compute.notifications.instance:Instance - instance_scheduled = ceilometer.compute.notifications.instance:InstanceScheduled - network = ceilometer.network.notifications:Network - subnet = ceilometer.network.notifications:Subnet - port = ceilometer.network.notifications:Port - router = ceilometer.network.notifications:Router - floatingip = ceilometer.network.notifications:FloatingIP - http.request = ceilometer.middleware:HTTPRequest - http.response = ceilometer.middleware:HTTPResponse - hardware.ipmi.temperature = ceilometer.ipmi.notifications.ironic:TemperatureSensorNotification - hardware.ipmi.voltage = ceilometer.ipmi.notifications.ironic:VoltageSensorNotification - hardware.ipmi.current = ceilometer.ipmi.notifications.ironic:CurrentSensorNotification - hardware.ipmi.fan = ceilometer.ipmi.notifications.ironic:FanSensorNotification - network.services.lb.pool = ceilometer.network.notifications:Pool - network.services.lb.vip = ceilometer.network.notifications:Vip - network.services.lb.member = ceilometer.network.notifications:Member - network.services.lb.health_monitor = ceilometer.network.notifications:HealthMonitor - network.services.firewall = ceilometer.network.notifications:Firewall - network.services.firewall.policy = ceilometer.network.notifications:FirewallPolicy - network.services.firewall.rule = ceilometer.network.notifications:FirewallRule - network.services.vpn = ceilometer.network.notifications:VPNService - network.services.vpn.ipsecpolicy = ceilometer.network.notifications:IPSecPolicy - network.services.vpn.ikepolicy = ceilometer.network.notifications:IKEPolicy - network.services.vpn.connections = ceilometer.network.notifications:IPSecSiteConnection - _sample = ceilometer.telemetry.notifications:TelemetryIpc - meter = ceilometer.meter.notifications:ProcessMeterNotifications - -ceilometer.discover = - local_instances = ceilometer.compute.discovery:InstanceDiscovery - endpoint = ceilometer.agent.discovery.endpoint:EndpointDiscovery - tenant = ceilometer.agent.discovery.tenant:TenantDiscovery - local_node = ceilometer.agent.discovery.localnode:LocalNodeDiscovery - lb_pools = ceilometer.network.services.discovery:LBPoolsDiscovery - lb_vips = ceilometer.network.services.discovery:LBVipsDiscovery - lb_members = ceilometer.network.services.discovery:LBMembersDiscovery - lb_listeners = ceilometer.network.services.discovery:LBListenersDiscovery - lb_loadbalancers = ceilometer.network.services.discovery:LBLoadBalancersDiscovery - lb_health_probes = ceilometer.network.services.discovery:LBHealthMonitorsDiscovery - vpn_services = ceilometer.network.services.discovery:VPNServicesDiscovery - ipsec_connections = ceilometer.network.services.discovery:IPSecConnectionsDiscovery - fw_services = ceilometer.network.services.discovery:FirewallDiscovery - fw_policy = ceilometer.network.services.discovery:FirewallPolicyDiscovery - tripleo_overcloud_nodes = ceilometer.hardware.discovery:NodesDiscoveryTripleO - fip_services = ceilometer.network.services.discovery:FloatingIPDiscovery - -ceilometer.poll.compute = - disk.read.requests = ceilometer.compute.pollsters.disk:ReadRequestsPollster - disk.write.requests = ceilometer.compute.pollsters.disk:WriteRequestsPollster - disk.read.bytes = ceilometer.compute.pollsters.disk:ReadBytesPollster - disk.write.bytes = ceilometer.compute.pollsters.disk:WriteBytesPollster - disk.read.requests.rate = ceilometer.compute.pollsters.disk:ReadRequestsRatePollster - disk.write.requests.rate = ceilometer.compute.pollsters.disk:WriteRequestsRatePollster - disk.read.bytes.rate = ceilometer.compute.pollsters.disk:ReadBytesRatePollster - disk.write.bytes.rate = ceilometer.compute.pollsters.disk:WriteBytesRatePollster - disk.device.read.requests = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsPollster - disk.device.write.requests = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsPollster - disk.device.read.bytes = ceilometer.compute.pollsters.disk:PerDeviceReadBytesPollster - disk.device.write.bytes = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesPollster - disk.device.read.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceReadRequestsRatePollster - disk.device.write.requests.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteRequestsRatePollster - disk.device.read.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceReadBytesRatePollster - disk.device.write.bytes.rate = ceilometer.compute.pollsters.disk:PerDeviceWriteBytesRatePollster - disk.latency = ceilometer.compute.pollsters.disk:DiskLatencyPollster - disk.device.latency = ceilometer.compute.pollsters.disk:PerDeviceDiskLatencyPollster - disk.iops = ceilometer.compute.pollsters.disk:DiskIOPSPollster - disk.device.iops = ceilometer.compute.pollsters.disk:PerDeviceDiskIOPSPollster - cpu = ceilometer.compute.pollsters.cpu:CPUPollster - cpu_util = ceilometer.compute.pollsters.cpu:CPUUtilPollster - network.incoming.bytes = ceilometer.compute.pollsters.net:IncomingBytesPollster - network.incoming.packets = ceilometer.compute.pollsters.net:IncomingPacketsPollster - network.outgoing.bytes = ceilometer.compute.pollsters.net:OutgoingBytesPollster - network.outgoing.packets = ceilometer.compute.pollsters.net:OutgoingPacketsPollster - network.incoming.bytes.rate = ceilometer.compute.pollsters.net:IncomingBytesRatePollster - network.outgoing.bytes.rate = ceilometer.compute.pollsters.net:OutgoingBytesRatePollster - instance = ceilometer.compute.pollsters.instance:InstancePollster - memory.usage = ceilometer.compute.pollsters.memory:MemoryUsagePollster - memory.resident = ceilometer.compute.pollsters.memory:MemoryResidentPollster - disk.capacity = ceilometer.compute.pollsters.disk:CapacityPollster - disk.allocation = ceilometer.compute.pollsters.disk:AllocationPollster - disk.usage = ceilometer.compute.pollsters.disk:PhysicalPollster - disk.device.capacity = ceilometer.compute.pollsters.disk:PerDeviceCapacityPollster - disk.device.allocation = ceilometer.compute.pollsters.disk:PerDeviceAllocationPollster - disk.device.usage = ceilometer.compute.pollsters.disk:PerDevicePhysicalPollster - -ceilometer.poll.ipmi = - hardware.ipmi.node.power = ceilometer.ipmi.pollsters.node:PowerPollster - hardware.ipmi.node.temperature = ceilometer.ipmi.pollsters.node:InletTemperaturePollster - hardware.ipmi.node.outlet_temperature = ceilometer.ipmi.pollsters.node:OutletTemperaturePollster - hardware.ipmi.node.airflow = ceilometer.ipmi.pollsters.node:AirflowPollster - hardware.ipmi.node.cups = ceilometer.ipmi.pollsters.node:CUPSIndexPollster - hardware.ipmi.node.cpu_util = ceilometer.ipmi.pollsters.node:CPUUtilPollster - hardware.ipmi.node.mem_util = ceilometer.ipmi.pollsters.node:MemUtilPollster - hardware.ipmi.node.io_util = ceilometer.ipmi.pollsters.node:IOUtilPollster - hardware.ipmi.temperature = ceilometer.ipmi.pollsters.sensor:TemperatureSensorPollster - hardware.ipmi.voltage = ceilometer.ipmi.pollsters.sensor:VoltageSensorPollster - hardware.ipmi.current = ceilometer.ipmi.pollsters.sensor:CurrentSensorPollster - hardware.ipmi.fan = ceilometer.ipmi.pollsters.sensor:FanSensorPollster - -ceilometer.poll.central = - ip.floating = ceilometer.network.floatingip:FloatingIPPollster - image = ceilometer.image.glance:ImagePollster - image.size = ceilometer.image.glance:ImageSizePollster - rgw.containers.objects = ceilometer.objectstore.rgw:ContainersObjectsPollster - rgw.containers.objects.size = ceilometer.objectstore.rgw:ContainersSizePollster - rgw.objects = ceilometer.objectstore.rgw:ObjectsPollster - rgw.objects.size = ceilometer.objectstore.rgw:ObjectsSizePollster - rgw.objects.containers = ceilometer.objectstore.rgw:ObjectsContainersPollster - rgw.usage = ceilometer.objectstore.rgw:UsagePollster - storage.containers.objects = ceilometer.objectstore.swift:ContainersObjectsPollster - storage.containers.objects.size = ceilometer.objectstore.swift:ContainersSizePollster - storage.objects = ceilometer.objectstore.swift:ObjectsPollster - storage.objects.size = ceilometer.objectstore.swift:ObjectsSizePollster - storage.objects.containers = ceilometer.objectstore.swift:ObjectsContainersPollster - energy = ceilometer.energy.kwapi:EnergyPollster - power = ceilometer.energy.kwapi:PowerPollster - switch.port = ceilometer.network.statistics.port:PortPollster - switch.port.receive.packets = ceilometer.network.statistics.port:PortPollsterReceivePackets - switch.port.transmit.packets = ceilometer.network.statistics.port:PortPollsterTransmitPackets - switch.port.receive.bytes = ceilometer.network.statistics.port:PortPollsterReceiveBytes - switch.port.transmit.bytes = ceilometer.network.statistics.port:PortPollsterTransmitBytes - switch.port.receive.drops = ceilometer.network.statistics.port:PortPollsterReceiveDrops - switch.port.transmit.drops = ceilometer.network.statistics.port:PortPollsterTransmitDrops - switch.port.receive.errors = ceilometer.network.statistics.port:PortPollsterReceiveErrors - switch.port.transmit.errors = ceilometer.network.statistics.port:PortPollsterTransmitErrors - switch.port.receive.frame_error = ceilometer.network.statistics.port:PortPollsterReceiveFrameErrors - switch.port.receive.overrun_error = ceilometer.network.statistics.port:PortPollsterReceiveOverrunErrors - switch.port.receive.crc_error = ceilometer.network.statistics.port:PortPollsterReceiveCRCErrors - switch.port.collision.count = ceilometer.network.statistics.port:PortPollsterCollisionCount - switch.table = ceilometer.network.statistics.table:TablePollster - switch.table.active.entries = ceilometer.network.statistics.table:TablePollsterActiveEntries - switch.table.lookup.packets = ceilometer.network.statistics.table:TablePollsterLookupPackets - switch.table.matched.packets = ceilometer.network.statistics.table:TablePollsterMatchedPackets - switch = ceilometer.network.statistics.switch:SWPollster - switch.flow = ceilometer.network.statistics.flow:FlowPollster - switch.flow.bytes = ceilometer.network.statistics.flow:FlowPollsterBytes - switch.flow.duration.nanoseconds = ceilometer.network.statistics.flow:FlowPollsterDurationNanoseconds - switch.flow.duration.seconds = ceilometer.network.statistics.flow:FlowPollsterDurationSeconds - switch.flow.packets = ceilometer.network.statistics.flow:FlowPollsterPackets - network.services.lb.pool = ceilometer.network.services.lbaas:LBPoolPollster - network.services.lb.vip = ceilometer.network.services.lbaas:LBVipPollster - network.services.lb.member = ceilometer.network.services.lbaas:LBMemberPollster - network.services.lb.listener = ceilometer.network.services.lbaas:LBListenerPollster - network.services.lb.loadbalancer = ceilometer.network.services.lbaas:LBLoadBalancerPollster - network.services.lb.health_monitor = ceilometer.network.services.lbaas:LBHealthMonitorPollster - network.services.lb.total.connections = ceilometer.network.services.lbaas:LBTotalConnectionsPollster - network.services.lb.active.connections = ceilometer.network.services.lbaas:LBActiveConnectionsPollster - network.services.lb.incoming.bytes = ceilometer.network.services.lbaas:LBBytesInPollster - network.services.lb.outgoing.bytes = ceilometer.network.services.lbaas:LBBytesOutPollster - network.services.vpn = ceilometer.network.services.vpnaas:VPNServicesPollster - network.services.vpn.connections = ceilometer.network.services.vpnaas:IPSecConnectionsPollster - network.services.firewall = ceilometer.network.services.fwaas:FirewallPollster - network.services.firewall.policy = ceilometer.network.services.fwaas:FirewallPolicyPollster - -ceilometer.builder.poll.central = - hardware.snmp = ceilometer.hardware.pollsters.generic:GenericHardwareDeclarativePollster - ceilometer.event.storage = es = ceilometer.event.storage.impl_elasticsearch:Connection log = ceilometer.event.storage.impl_log:Connection @@ -192,77 +35,13 @@ ceilometer.event.storage = sqlite = ceilometer.event.storage.impl_sqlalchemy:Connection hbase = ceilometer.event.storage.impl_hbase:Connection -ceilometer.metering.storage = - log = ceilometer.storage.impl_log:Connection - mongodb = ceilometer.storage.impl_mongodb:Connection - mysql = ceilometer.storage.impl_sqlalchemy:Connection - postgresql = ceilometer.storage.impl_sqlalchemy:Connection - sqlite = ceilometer.storage.impl_sqlalchemy:Connection - hbase = ceilometer.storage.impl_hbase:Connection - -ceilometer.compute.virt = - libvirt = ceilometer.compute.virt.libvirt.inspector:LibvirtInspector - hyperv = ceilometer.compute.virt.hyperv.inspector:HyperVInspector - vsphere = ceilometer.compute.virt.vmware.inspector:VsphereInspector - xenapi = ceilometer.compute.virt.xenapi.inspector:XenapiInspector - -ceilometer.hardware.inspectors = - snmp = ceilometer.hardware.inspector.snmp:SNMPInspector - -ceilometer.transformer = - accumulator = ceilometer.transformer.accumulator:TransformerAccumulator - delta = ceilometer.transformer.conversions:DeltaTransformer - unit_conversion = ceilometer.transformer.conversions:ScalingTransformer - rate_of_change = ceilometer.transformer.conversions:RateOfChangeTransformer - aggregator = ceilometer.transformer.conversions:AggregatorTransformer - arithmetic = ceilometer.transformer.arithmetic:ArithmeticTransformer - -ceilometer.publisher = - test = ceilometer.publisher.test:TestPublisher - notifier = ceilometer.publisher.messaging:SampleNotifierPublisher - udp = ceilometer.publisher.udp:UDPPublisher - file = ceilometer.publisher.file:FilePublisher - direct = ceilometer.publisher.direct:DirectPublisher - kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher - http = ceilometer.publisher.http:HttpPublisher - -ceilometer.event.publisher = - test = ceilometer.publisher.test:TestPublisher - direct = ceilometer.publisher.direct:DirectPublisher - notifier = ceilometer.publisher.messaging:EventNotifierPublisher - kafka = ceilometer.publisher.kafka_broker:KafkaBrokerPublisher - http = ceilometer.publisher.http:HttpPublisher - -ceilometer.event.trait_plugin = - split = ceilometer.event.trait_plugins:SplitterTraitPlugin - bitfield = ceilometer.event.trait_plugins:BitfieldTraitPlugin - timedelta = ceilometer.event.trait_plugins:TimedeltaPlugin - - console_scripts = ceilometer-api = ceilometer.cmd.api:main - ceilometer-polling = ceilometer.cmd.polling:main - ceilometer-agent-notification = ceilometer.cmd.agent_notification:main - ceilometer-send-sample = ceilometer.cmd.sample:send_sample ceilometer-dbsync = ceilometer.cmd.storage:dbsync ceilometer-expirer = ceilometer.cmd.storage:expirer - ceilometer-rootwrap = oslo_rootwrap.cmd:main - ceilometer-collector = ceilometer.cmd.collector:main - -ceilometer.dispatcher.meter = - database = ceilometer.dispatcher.database:DatabaseDispatcher - file = ceilometer.dispatcher.file:FileDispatcher - http = ceilometer.dispatcher.http:HttpDispatcher - gnocchi = ceilometer.dispatcher.gnocchi:GnocchiDispatcher ceilometer.dispatcher.event = database = ceilometer.dispatcher.database:DatabaseDispatcher - file = ceilometer.dispatcher.file:FileDispatcher - http = ceilometer.dispatcher.http:HttpDispatcher - -network.statistics.drivers = - opendaylight = ceilometer.network.statistics.opendaylight.driver:OpenDayLightDriver - opencontrail = ceilometer.network.statistics.opencontrail.driver:OpencontrailDriver oslo.config.opts = ceilometer = ceilometer.opts:list_opts @@ -270,9 +49,6 @@ oslo.config.opts = oslo.config.opts.defaults = ceilometer = ceilometer.conf.defaults:set_cors_middleware_defaults -tempest.test_plugins = - ceilometer_tests = ceilometer.tests.tempest.plugin:CeilometerTempestPlugin - [build_sphinx] all_files = 1 build-dir = doc/build diff --git a/test-requirements.txt b/test-requirements.txt index 9aa5d54c..96d060db 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -2,32 +2,24 @@ # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. -contextlib2>=0.4.0 # PSF License coverage>=3.6 # Apache-2.0 elasticsearch<2.0,>=1.3.0 # Apache-2.0 fixtures<2.0,>=1.3.1 # Apache-2.0/BSD happybase!=0.7,>=0.5;python_version=='2.7' # MIT mock>=1.2 # BSD PyMySQL>=0.6.2 # MIT License -os-win>=0.2.3 # Apache-2.0 -oslo.cache>=1.5.0 # Apache-2.0 # Docs Requirements oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 -reno>=1.6.2 # Apache2 oslotest>=1.10.0 # Apache-2.0 -oslo.vmware>=1.16.0 # Apache-2.0 psycopg2>=2.5 # LGPL/ZPL pymongo!=3.1,>=3.0.2 # Apache-2.0 -gnocchiclient>=2.2.0 # Apache-2.0 python-subunit>=0.0.18 # Apache-2.0/BSD sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD sphinxcontrib-httpdomain # BSD sphinxcontrib-pecanwsme>=0.8 # Apache-2.0 testrepository>=0.0.18 # Apache-2.0/BSD -testscenarios>=0.4 # Apache-2.0/BSD testtools>=1.4.0 # MIT gabbi>=1.11.0 # Apache-2.0 -requests-aws>=0.1.4 # BSD License (3 clause) os-testr>=0.4.1 # Apache-2.0 WebTest>=2.0 # MIT pifpaf>=0.0.11 diff --git a/tools/ceilometer-test-event.py b/tools/ceilometer-test-event.py deleted file mode 100755 index dedc7ac0..00000000 --- a/tools/ceilometer-test-event.py +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2013 Rackspace Hosting. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Command line tool help you debug your event definitions. - -Feed it a list of test notifications in json format, and it will show -you what events will be generated. -""" - -import json -import sys - -from oslo_config import cfg -from stevedore import extension - -from ceilometer.event import converter -from ceilometer import service - - -cfg.CONF.register_cli_opts([ - cfg.StrOpt('input-file', - short='i', - help='File to read test notifications from.' - ' (Containing a json list of notifications.)' - ' defaults to stdin.'), - cfg.StrOpt('output-file', - short='o', - help='File to write results to. Defaults to stdout.'), -]) - -TYPES = {1: 'text', - 2: 'int', - 3: 'float', - 4: 'datetime'} - - -service.prepare_service() - -output_file = cfg.CONF.output_file -input_file = cfg.CONF.input_file - -if output_file is None: - out = sys.stdout -else: - out = open(output_file, 'w') - -if input_file is None: - notifications = json.load(sys.stdin) -else: - with open(input_file, 'r') as f: - notifications = json.load(f) - -out.write("Definitions file: %s\n" % cfg.CONF.event.definitions_cfg_file) -out.write("Notifications tested: %s\n" % len(notifications)) - -event_converter = converter.setup_events( - extension.ExtensionManager( - namespace='ceilometer.event.trait_plugin')) - -for notification in notifications: - event = event_converter.to_event(notification) - if event is None: - out.write("Dropped notification: %s\n" % - notification['message_id']) - continue - out.write("Event: %s at %s\n" % (event.event_type, event.generated)) - for trait in event.traits: - dtype = TYPES[trait.dtype] - out.write(" Trait: name: %s, type: %s, value: %s\n" % ( - trait.name, dtype, trait.value)) diff --git a/tools/make_test_data.py b/tools/make_test_data.py deleted file mode 100755 index b58a60ee..00000000 --- a/tools/make_test_data.py +++ /dev/null @@ -1,229 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012 New Dream Network, LLC (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Command line tool for creating test data for Ceilometer. - -Usage: - -Generate testing data for e.g. for default time span - -source .tox/py27/bin/activate -./tools/make_test_data.py --user 1 --project 1 --resource 1 --counter cpu_util ---volume 20 -""" -import argparse -import datetime -import random -import uuid - -from oslo_config import cfg -from oslo_utils import timeutils - -from ceilometer.publisher import utils -from ceilometer import sample -from ceilometer import storage - - -def make_test_data(name, meter_type, unit, volume, random_min, - random_max, user_id, project_id, resource_id, start, - end, interval, resource_metadata=None, source='artificial'): - resource_metadata = resource_metadata or {'display_name': 'toto', - 'host': 'tata', - 'image_ref': 'test', - 'instance_flavor_id': 'toto', - 'server_group': 'toto', - } - # Compute start and end timestamps for the new data. - if isinstance(start, datetime.datetime): - timestamp = start - else: - timestamp = timeutils.parse_strtime(start) - - if not isinstance(end, datetime.datetime): - end = timeutils.parse_strtime(end) - - increment = datetime.timedelta(minutes=interval) - - print('Adding new samples for meter %s.' % (name)) - # Generate samples - n = 0 - total_volume = volume - while timestamp <= end: - if (random_min >= 0 and random_max >= 0): - # If there is a random element defined, we will add it to - # user given volume. - if isinstance(random_min, int) and isinstance(random_max, int): - total_volume += random.randint(random_min, random_max) - else: - total_volume += random.uniform(random_min, random_max) - - c = sample.Sample(name=name, - type=meter_type, - unit=unit, - volume=total_volume, - user_id=user_id, - project_id=project_id, - resource_id=resource_id, - timestamp=timestamp.isoformat(), - resource_metadata=resource_metadata, - source=source, - ) - data = utils.meter_message_from_counter( - c, cfg.CONF.publisher.telemetry_secret) - # timestamp should be string when calculating signature, but should be - # datetime object when calling record_metering_data. - data['timestamp'] = timestamp - yield data - n += 1 - timestamp = timestamp + increment - - if (meter_type == 'gauge' or meter_type == 'delta'): - # For delta and gauge, we don't want to increase the value - # in time by random element. So we always set it back to - # volume. - total_volume = volume - - print('Added %d new samples for meter %s.' % (n, name)) - - -def record_test_data(conn, *args, **kwargs): - for data in make_test_data(*args, **kwargs): - conn.record_metering_data(data) - - -def get_parser(): - parser = argparse.ArgumentParser( - description='generate metering data', - ) - parser.add_argument( - '--interval', - default=10, - type=int, - help='The period between samples, in minutes.', - ) - parser.add_argument( - '--start', - default=31, - help='Number of days to be stepped back from now or date in the past (' - '"YYYY-MM-DDTHH:MM:SS" format) to define timestamps start range.', - ) - parser.add_argument( - '--end', - default=2, - help='Number of days to be stepped forward from now or date in the ' - 'future ("YYYY-MM-DDTHH:MM:SS" format) to define timestamps end ' - 'range.', - ) - parser.add_argument( - '--type', - choices=('gauge', 'cumulative'), - default='gauge', - dest='meter_type', - help='Counter type.', - ) - parser.add_argument( - '--unit', - default=None, - help='Counter unit.', - ) - parser.add_argument( - '--project', - dest='project_id', - help='Project id of owner.', - ) - parser.add_argument( - '--user', - dest='user_id', - help='User id of owner.', - ) - parser.add_argument( - '--random_min', - help='The random min border of amount for added to given volume.', - type=int, - default=0, - ) - parser.add_argument( - '--random_max', - help='The random max border of amount for added to given volume.', - type=int, - default=0, - ) - parser.add_argument( - '--resource', - dest='resource_id', - default=str(uuid.uuid4()), - help='The resource id for the meter data.', - ) - parser.add_argument( - '--counter', - default='instance', - dest='name', - help='The counter name for the meter data.', - ) - parser.add_argument( - '--volume', - help='The amount to attach to the meter.', - type=int, - default=1, - ) - return parser - - -def main(): - cfg.CONF([], project='ceilometer') - - args = get_parser().parse_args() - - # Connect to the metering database - conn = storage.get_connection_from_config(cfg.CONF) - - # Find the user and/or project for a real resource - if not (args.user_id or args.project_id): - for r in conn.get_resources(): - if r.resource_id == args.resource_id: - args.user_id = r.user_id - args.project_id = r.project_id - break - - # Compute the correct time span - format = '%Y-%m-%dT%H:%M:%S' - - try: - start = datetime.datetime.utcnow() - datetime.timedelta( - days=int(args.start)) - except ValueError: - try: - start = datetime.datetime.strptime(args.start, format) - except ValueError: - raise - - try: - end = datetime.datetime.utcnow() + datetime.timedelta( - days=int(args.end)) - except ValueError: - try: - end = datetime.datetime.strptime(args.end, format) - except ValueError: - raise - args.start = start - args.end = end - record_test_data(conn=conn, **args.__dict__) - - return 0 - - -if __name__ == '__main__': - main() diff --git a/tools/make_test_data.sh b/tools/make_test_data.sh deleted file mode 100755 index 23a93e88..00000000 --- a/tools/make_test_data.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/bash - -bindir=$(dirname $0) - -project_name="$1" -if [ -z "$project_name" ] -then - project_name=demo -fi - -if [ -z "$OS_USERNAME" ] -then - user=demo -else - user=$OS_USERNAME -fi - -# Convert a possible project name to an id, if we have -# openstack cli installed. -if which openstack >/dev/null -then - project=$(openstack project show "$project_name" -c id -f value) -else - # Assume they gave us the project id as argument. - project="$project_name" -fi - -if [ -z "$project" ] -then - echo "Could not determine project id for \"$project_name\"" 1>&2 - exit 1 -fi - -early1="2012-08-27T07:00:00" -early2="2012-08-27T17:00:00" - -start="2012-08-28T00:00:00" - -middle1="2012-08-28T08:00:00" -middle2="2012-08-28T18:00:00" -middle3="2012-08-29T09:00:00" -middle4="2012-08-29T19:00:00" - -end="2012-08-31T23:59:00" - -late1="2012-08-31T10:00:00" -late2="2012-08-31T20:00:00" - -mkdata() { - ${bindir}/make_test_data.py --project "$project" \ - --user "$user" --start "$2" --end "$3" \ - --resource "$1" --counter instance --volume 1 -} - -dates=(early1 early2 start middle1 middle2 middle3 middle4 end late1 late2) - -echo $project - -for i in $(seq 0 $((${#dates[@]} - 2)) ) -do - - iname=${dates[$i]} - eval "ivalue=\$$iname" - - for j in $(seq $((i + 1)) $((${#dates[@]} - 1)) ) - do - jname=${dates[$j]} - eval "jvalue=\$$jname" - - resource_id="${project_name}-$iname-$jname" - echo "$resource_id" - - mkdata "$resource_id" "$ivalue" "$jvalue" - [ $? -eq 0 ] || exit $? - done - echo -done diff --git a/tools/make_test_event_data.py b/tools/make_test_event_data.py index 1df6db4f..d665bc87 100755 --- a/tools/make_test_event_data.py +++ b/tools/make_test_event_data.py @@ -99,7 +99,7 @@ def main(): args = parser.parse_args() # Connect to the event database - conn = storage.get_connection_from_config(cfg.CONF, 'event') + conn = storage.get_connection_from_config(cfg.CONF) # Compute the correct time span start = datetime.datetime.utcnow() - datetime.timedelta(days=args.start) diff --git a/tools/send_test_data.py b/tools/send_test_data.py deleted file mode 100755 index 867c34a4..00000000 --- a/tools/send_test_data.py +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/env python -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Command line tool for sending test data for Ceilometer via oslo.messaging. - -Usage: - -Send messages with samples generated by make_test_data - -source .tox/py27/bin/activate -./tools/send_test_data.py --count 1000 --resources_count 10 --topic metering -""" -import argparse -import datetime -import functools -import json -import random -import uuid - -import make_test_data -from oslo_config import cfg -import oslo_messaging -from six import moves - -from ceilometer import messaging -from ceilometer.publisher import utils -from ceilometer import service - - -def send_batch_notifier(notifier, topic, batch): - notifier.sample({}, event_type=topic, payload=batch) - - -def get_notifier(config_file): - service.prepare_service(argv=['/', '--config-file', config_file]) - return oslo_messaging.Notifier( - messaging.get_transport(), - driver='messagingv2', - publisher_id='telemetry.publisher.test', - topics=['metering'], - ) - - -def generate_data(send_batch, make_data_args, samples_count, - batch_size, resources_count, topic): - make_data_args.interval = 1 - make_data_args.start = (datetime.datetime.utcnow() - - datetime.timedelta(minutes=samples_count)) - make_data_args.end = datetime.datetime.utcnow() - - make_data_args.resource_id = None - resources_list = [str(uuid.uuid4()) - for _ in moves.xrange(resources_count)] - resource_samples = {resource: 0 for resource in resources_list} - batch = [] - count = 0 - for sample in make_test_data.make_test_data(**make_data_args.__dict__): - count += 1 - resource = resources_list[random.randint(0, len(resources_list) - 1)] - resource_samples[resource] += 1 - sample['resource_id'] = resource - # need to change the timestamp from datetime.datetime type to iso - # format (unicode type), because collector will change iso format - # timestamp to datetime.datetime type before recording to db. - sample['timestamp'] = sample['timestamp'].isoformat() - # need to recalculate signature because of the resource_id change - sig = utils.compute_signature(sample, - cfg.CONF.publisher.telemetry_secret) - sample['message_signature'] = sig - batch.append(sample) - if len(batch) == batch_size: - send_batch(topic, batch) - batch = [] - if count == samples_count: - send_batch(topic, batch) - return resource_samples - send_batch(topic, batch) - return resource_samples - - -def get_parser(): - parser = argparse.ArgumentParser() - - parser.add_argument( - '--batch-size', - dest='batch_size', - type=int, - default=100 - ) - parser.add_argument( - '--config-file', - default='/etc/ceilometer/ceilometer.conf' - ) - parser.add_argument( - '--topic', - default='perfmetering' - ) - parser.add_argument( - '--samples-count', - dest='samples_count', - type=int, - default=1000 - ) - parser.add_argument( - '--resources-count', - dest='resources_count', - type=int, - default=100 - ) - parser.add_argument( - '--result-directory', - dest='result_dir', - default='/tmp' - ) - return parser - - -def main(): - args = get_parser().parse_known_args()[0] - make_data_args = make_test_data.get_parser().parse_known_args()[0] - notifier = get_notifier(args.config_file) - send_batch = functools.partial(send_batch_notifier, notifier) - result_dir = args.result_dir - del args.notify - del args.config_file - del args.result_dir - - resource_writes = generate_data(send_batch, make_data_args, - **args.__dict__) - result_file = "%s/sample-by-resource-%s" % (result_dir, - random.getrandbits(32)) - with open(result_file, 'w') as f: - f.write(json.dumps(resource_writes)) - return result_file - - -if __name__ == '__main__': - main() diff --git a/tools/show_data.py b/tools/show_data.py deleted file mode 100755 index 754dddcc..00000000 --- a/tools/show_data.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012 New Dream Network (DreamHost) -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import sys - -from oslo_config import cfg -import six - -from ceilometer import storage - - -def show_users(db, args): - for u in sorted(db.get_users()): - print(u) - - -def show_resources(db, args): - if args: - users = args - else: - users = sorted(db.get_users()) - for u in users: - print(u) - for resource in db.get_resources(user=u): - print(' %(resource_id)s %(timestamp)s' % resource) - for k, v in sorted(six.iteritems(resource['metadata'])): - print(' %-10s : %s' % (k, v)) - for meter in resource['meter']: - totals = db.get_statistics(storage.SampleFilter( - user=u, - meter=meter['counter_name'], - resource=resource['resource_id'], - )) - # FIXME(dhellmann): Need a way to tell whether to use - # max() or sum() by meter name without hard-coding. - if meter['counter_name'] in ['cpu', 'disk']: - value = totals[0]['max'] - else: - value = totals[0]['sum'] - print(' %s (%s): %s' % - (meter['counter_name'], meter['counter_type'], value)) - - -def show_total_resources(db, args): - if args: - users = args - else: - users = sorted(db.get_users()) - for u in users: - print(u) - for meter in ['disk', 'cpu', 'instance']: - stats = db.get_statistics(storage.SampleFilter( - user=u, - meter=meter, - )) - if meter in ['cpu', 'disk']: - total = stats['max'] - else: - total = stats['sum'] - print(' ', meter, total) - - -def show_raw(db, args): - fmt = ' %(timestamp)s %(counter_name)10s %(counter_volume)s' - for u in sorted(db.get_users()): - print(u) - for resource in db.get_resources(user=u): - print(' ', resource['resource_id']) - for sample in db.get_samples(storage.SampleFilter( - user=u, - resource=resource['resource_id'], - )): - print(fmt % sample) - - -def show_help(db, args): - print('COMMANDS:') - for name in sorted(COMMANDS.keys()): - print(name) - - -def show_projects(db, args): - for u in sorted(db.get_projects()): - print(u) - - -COMMANDS = { - 'users': show_users, - 'projects': show_projects, - 'help': show_help, - 'resources': show_resources, - 'total_resources': show_total_resources, - 'raw': show_raw, -} - - -def main(argv): - extra_args = cfg.CONF( - sys.argv[1:], - # NOTE(dhellmann): Read the configuration file(s) for the - # ceilometer collector by default. - default_config_files=['/etc/ceilometer/ceilometer.conf'], - ) - db = storage.get_connection_from_config(cfg.CONF) - command = extra_args[0] if extra_args else 'help' - COMMANDS[command](db, extra_args[1:]) - - -if __name__ == '__main__': - main(sys.argv) diff --git a/tools/test_hbase_table_utils.py b/tools/test_hbase_table_utils.py index 10294e31..0bf370d9 100755 --- a/tools/test_hbase_table_utils.py +++ b/tools/test_hbase_table_utils.py @@ -26,14 +26,11 @@ def main(argv): url = ("%s?table_prefix=%s" % (os.getenv("CEILOMETER_TEST_STORAGE_URL"), os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX", "test"))) - conn = storage.get_connection(url, 'ceilometer.metering.storage') - event_conn = storage.get_connection(url, 'ceilometer.event.storage') + event_conn = storage.get_connection(url) for arg in argv: if arg == "--upgrade": - conn.upgrade() event_conn.upgrade() if arg == "--clear": - conn.clear() event_conn.clear() diff --git a/tox.ini b/tox.ini index 51f011d7..e93156ae 100644 --- a/tox.ini +++ b/tox.ini @@ -48,18 +48,6 @@ passenv = CEILOMETER_* commands = bash -x {toxinidir}/run-functional-tests.sh "{posargs}" -[testenv:integration] -setenv = VIRTUAL_ENV={envdir} - OS_TEST_PATH=./ceilometer/tests/integration - OS_TEST_TIMEOUT=2400 - GABBI_LIVE_FAIL_IF_NO_TEST=1 -passenv = {[testenv]passenv} HEAT_* CEILOMETER_* GNOCCHI_* AODH_* GLANCE_* NOVA_* ADMIN_* -# FIXME(sileht): run gabbi-run to failfast in case of error because testr -# doesn't support --failfast, but we loose the testr report. -commands = - bash -c 'cd ceilometer/tests/integration/gabbi/gabbits-live && gabbi-run -x < autoscaling.yaml' -# bash -x {toxinidir}/tools/pretty_tox.sh "{posargs}" - # NOTE(chdent): The gabbi tests are also run under the other functional # tox targets. This target simply provides a target to directly run just # gabbi tests without needing to do discovery across the entire body of